source: rtems/cpukit/score/src/schedulerpriorityaffinitysmp.c @ 0e754fac

5
Last change on this file since 0e754fac was 0e754fac, checked in by Sebastian Huber <sebastian.huber@…>, on Oct 21, 2016 at 12:41:19 PM

score: Delete unused scheduler ask for help X op

  • Property mode set to 100644
File size: 18.9 KB
Line 
1/**
2 * @file
3 *
4 * @brief Deterministic Priority Affinity SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerPriorityAffinitySMP
7 */
8
9/*
10 *  COPYRIGHT (c) 2014.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#if HAVE_CONFIG_H
19  #include "config.h"
20#endif
21
22#include <rtems/score/schedulerpriorityaffinitysmp.h>
23#include <rtems/score/schedulerpriorityimpl.h>
24#include <rtems/score/schedulersmpimpl.h>
25#include <rtems/score/schedulerprioritysmpimpl.h>
26#include <rtems/score/wkspace.h>
27#include <rtems/score/cpusetimpl.h>
28
29#include <rtems/score/priority.h>
30
31/*
32 * The following methods which initially were static in schedulerprioritysmp.c
33 * are shared with this scheduler. They are now public so they can be shared.
34 *
35 *  + _Scheduler_priority_SMP_Get_self
36 *  + _Scheduler_priority_SMP_Insert_ready_fifo
37 *  + _Scheduler_priority_SMP_Insert_ready_lifo
38 *  + _Scheduler_priority_SMP_Thread_get_node
39 *  + _Scheduler_priority_SMP_Move_from_scheduled_to_ready
40 *  + _Scheduler_priority_SMP_Move_from_ready_to_scheduled
41 *  + _Scheduler_priority_SMP_Extract_from_ready
42 *  + _Scheduler_priority_SMP_Do_update
43 */
44
45static bool _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order(
46  const Chain_Node *to_insert,
47  const Chain_Node *next
48)
49{
50  return next != NULL
51    && _Scheduler_SMP_Insert_priority_lifo_order( to_insert, next );
52}
53
54static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
55  const Chain_Node *to_insert,
56  const Chain_Node *next
57)
58{
59  return next != NULL
60    && _Scheduler_SMP_Insert_priority_fifo_order( to_insert, next );
61}
62
63/*
64 * This method returns the scheduler node for the specified thread
65 * as a scheduler specific type.
66 */
67static Scheduler_priority_affinity_SMP_Node *
68_Scheduler_priority_affinity_SMP_Thread_get_node(
69  Thread_Control *thread
70)
71{
72  return (Scheduler_priority_affinity_SMP_Node *)
73    _Scheduler_Thread_get_node( thread );
74}
75
76static Scheduler_priority_affinity_SMP_Node *
77_Scheduler_priority_affinity_SMP_Node_downcast(
78  Scheduler_Node *node
79)
80{
81  return (Scheduler_priority_affinity_SMP_Node *) node;
82}
83
84/*
85 * This method initializes the scheduler control information for
86 * this scheduler instance.
87 */
88void _Scheduler_priority_affinity_SMP_Node_initialize(
89  const Scheduler_Control *scheduler,
90  Scheduler_Node          *node,
91  Thread_Control          *the_thread,
92  Priority_Control         priority
93)
94{
95  Scheduler_priority_affinity_SMP_Node *the_node;
96
97  _Scheduler_priority_SMP_Node_initialize( scheduler, node, the_thread, priority );
98
99  /*
100   *  All we add is affinity information to the basic SMP node.
101   */
102  the_node = _Scheduler_priority_affinity_SMP_Node_downcast( node );
103  the_node->Affinity     = *_CPU_set_Default();
104  the_node->Affinity.set = &the_node->Affinity.preallocated;
105}
106
107/*
108 * This method is unique to this scheduler because it takes into
109 * account affinity as it determines the highest ready thread.
110 * Since this is used to pick a new thread to replace the victim,
111 * the highest ready thread must have affinity such that it can
112 * be executed on the victim's processor.
113 */
114static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
115  Scheduler_Context *context,
116  Scheduler_Node    *victim
117)
118{
119  Scheduler_priority_SMP_Context       *self =
120    _Scheduler_priority_SMP_Get_self( context );
121  Priority_Control                      index;
122  Scheduler_Node                       *highest = NULL;
123  Thread_Control                       *victim_thread;
124  uint32_t                              victim_cpu_index;
125  Scheduler_priority_affinity_SMP_Node *node;
126
127  /*
128   * This is done when we need to check if reevaluations are needed.
129   */
130  if ( victim == NULL ) {
131    node = (Scheduler_priority_affinity_SMP_Node *)
132      _Scheduler_priority_Ready_queue_first(
133        &self->Bit_map,
134        &self->Ready[ 0 ]
135      );
136
137    return &node->Base.Base.Base;
138  }
139
140  victim_thread = _Scheduler_Node_get_owner( victim );
141  victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) );
142
143  /**
144   * @todo The deterministic priority scheduler structure is optimized
145   * for insertion, extraction, and finding the highest priority
146   * thread. Scanning the list of ready threads is not a purpose
147   * for which it was optimized. There are optimizations to be
148   * made in this loop.
149   *
150   * + by checking the major bit, we could potentially skip entire
151   *   groups of 16.
152   *
153   * When using this scheduler as implemented, the application's
154   * choice of numeric priorities and their distribution can have
155   * an impact on performance.
156   */
157  for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
158        index <= PRIORITY_MAXIMUM;
159        index++ )
160  {
161    Chain_Control   *chain =  &self->Ready[index];
162    Chain_Node      *chain_node;
163    for ( chain_node = _Chain_First( chain );
164          chain_node != _Chain_Immutable_tail( chain ) ;
165          chain_node = _Chain_Next( chain_node ) )
166    {
167      node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
168
169      /*
170       * Can this thread run on this CPU?
171       */
172      if ( CPU_ISSET( (int) victim_cpu_index, node->Affinity.set ) ) {
173        highest = &node->Base.Base.Base;
174        break;
175      }
176    }
177    if ( highest )
178      break;
179  }
180
181  _Assert( highest != NULL );
182
183  return highest;
184}
185
186/*
187 * This method is very similar to _Scheduler_priority_affinity_SMP_Block
188 * but has the difference that is invokes this scheduler's
189 * get_highest_ready() support method.
190 */
191void _Scheduler_priority_affinity_SMP_Block(
192  const Scheduler_Control *scheduler,
193  Thread_Control          *thread,
194  Scheduler_Node          *node
195)
196{
197  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
198
199  _Scheduler_SMP_Block(
200    context,
201    thread,
202    node,
203    _Scheduler_priority_SMP_Extract_from_ready,
204    _Scheduler_priority_affinity_SMP_Get_highest_ready,
205    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
206    _Scheduler_SMP_Allocate_processor_exact
207  );
208
209  /*
210   * Since this removed a single thread from the scheduled set
211   * and selected the most appropriate thread from the ready
212   * set to replace it, there should be no need for thread
213   * migrations.
214   */
215}
216
217/*
218 * This method is unique to this scheduler because it must take into
219 * account affinity as it searches for the lowest priority scheduled
220 * thread. It ignores those which cannot be replaced by the filter
221 * thread because the potential victim thread does not have affinity
222 * for that processor.
223 */
224static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
225  Scheduler_Context *context,
226  Scheduler_Node    *filter_base,
227  Chain_Node_order   order
228)
229{
230  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
231  Scheduler_Node *lowest_scheduled = NULL;
232  Chain_Control   *scheduled = &self->Scheduled;
233  Chain_Node      *chain_node;
234  Scheduler_priority_affinity_SMP_Node *filter =
235    _Scheduler_priority_affinity_SMP_Node_downcast( filter_base );
236
237  for ( chain_node = _Chain_Last( scheduled );
238        chain_node != _Chain_Immutable_head( scheduled ) ;
239        chain_node = _Chain_Previous( chain_node ) ) {
240    Scheduler_priority_affinity_SMP_Node *node;
241    Thread_Control                       *thread;
242    uint32_t                              cpu_index;
243
244    node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
245
246    /*
247     * If we didn't find a thread which is of equal or lower importance
248     * than filter thread is, then we can't schedule the filter thread
249     * to execute.
250     */
251    if ( (*order)( &node->Base.Base.Base.Node, &filter->Base.Base.Base.Node ) )
252      break;
253
254    /* cpu_index is the processor number thread is executing on */
255    thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
256    cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
257
258    if ( CPU_ISSET( (int) cpu_index, filter->Affinity.set ) ) {
259      lowest_scheduled = &node->Base.Base.Base;
260      break;
261    }
262
263  }
264
265  return lowest_scheduled;
266}
267
268/*
269 * This method is unique to this scheduler because it must pass
270 * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
271 * _Scheduler_SMP_Enqueue_ordered.
272 */
273static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_fifo(
274  Scheduler_Context *context,
275  Scheduler_Node    *node,
276  Thread_Control    *needs_help
277)
278{
279  return _Scheduler_SMP_Enqueue_ordered(
280    context,
281    node,
282    needs_help,
283    _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order,
284    _Scheduler_priority_SMP_Insert_ready_fifo,
285    _Scheduler_SMP_Insert_scheduled_fifo,
286    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
287    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
288    _Scheduler_SMP_Allocate_processor_exact
289  );
290}
291
292/*
293 * This method is invoked at the end of certain scheduling operations
294 * to ensure that the highest priority ready thread cannot be scheduled
295 * to execute. When we schedule with affinity, there is the possibility
296 * that we need to migrate a thread to another core to ensure that the
297 * highest priority ready threads are in fact scheduled.
298 */
299static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
300  Scheduler_Context *context
301)
302{
303  Scheduler_priority_SMP_Context *self;
304  Scheduler_Node                 *lowest_scheduled;
305  Scheduler_Node                 *highest_ready;
306
307  self = _Scheduler_priority_SMP_Get_self( context );
308
309  while (1) {
310    if ( _Priority_bit_map_Is_empty( &self->Bit_map ) ) {
311      /* Nothing to do */
312      break;
313    }
314
315    highest_ready =
316      _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
317
318    lowest_scheduled =
319      _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
320        context,
321        highest_ready,
322        _Scheduler_SMP_Insert_priority_lifo_order
323      );
324
325    /*
326     * If we can't find a thread to displace from the scheduled set,
327     * then we have placed all the highest priority threads possible
328     * in the scheduled set.
329     *
330     * We found the absolute highest priority thread without
331     * considering affinity. But now we have to consider that thread's
332     * affinity as we look to place it.
333     */
334    if ( lowest_scheduled == NULL )
335      break;
336
337    /*
338     * FIXME: Do not consider threads using the scheduler helping protocol
339     * since this could produce more than one thread in need for help in one
340     * operation which is currently not possible.
341     */
342    if ( lowest_scheduled->help_state != SCHEDULER_HELP_YOURSELF )
343      break;
344
345    /*
346     * But if we found a thread which is lower priority than one
347     * in the ready set, then we need to swap them out.
348     */
349
350    _Scheduler_priority_SMP_Extract_from_ready( context, highest_ready );
351    _Scheduler_SMP_Enqueue_to_scheduled(
352      context,
353      highest_ready,
354      lowest_scheduled,
355      _Scheduler_SMP_Insert_scheduled_fifo,
356      _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
357      _Scheduler_SMP_Allocate_processor_exact
358    );
359  }
360}
361
362/*
363 * This is the public scheduler specific Unblock operation.
364 */
365Thread_Control *_Scheduler_priority_affinity_SMP_Unblock(
366  const Scheduler_Control *scheduler,
367  Thread_Control          *thread,
368  Scheduler_Node          *node
369)
370{
371  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
372  Thread_Control    *needs_help;
373
374  needs_help = _Scheduler_SMP_Unblock(
375    context,
376    thread,
377    node,
378    _Scheduler_priority_SMP_Do_update,
379    _Scheduler_priority_affinity_SMP_Enqueue_fifo
380  );
381
382  /*
383   * Perform any thread migrations that are needed due to these changes.
384   */
385  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
386
387  return needs_help;
388}
389
390/*
391 *  This is unique to this scheduler because it passes scheduler specific
392 *  get_lowest_scheduled helper to _Scheduler_SMP_Enqueue_ordered.
393 */
394static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_ordered(
395  Scheduler_Context     *context,
396  Scheduler_Node        *node,
397  Thread_Control        *needs_help,
398  Chain_Node_order       order,
399  Scheduler_SMP_Insert   insert_ready,
400  Scheduler_SMP_Insert   insert_scheduled
401)
402{
403  return _Scheduler_SMP_Enqueue_ordered(
404    context,
405    node,
406    needs_help,
407    order,
408    insert_ready,
409    insert_scheduled,
410    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
411    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
412    _Scheduler_SMP_Allocate_processor_exact
413  );
414}
415
416/*
417 *  This is unique to this scheduler because it is on the path
418 *  to _Scheduler_priority_affinity_SMP_Enqueue_ordered() which
419 *  invokes a scheduler unique get_lowest_scheduled helper.
420 */
421static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_lifo(
422  Scheduler_Context *context,
423  Scheduler_Node    *node,
424  Thread_Control    *needs_help
425)
426{
427  return _Scheduler_priority_affinity_SMP_Enqueue_ordered(
428    context,
429    node,
430    needs_help,
431    _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order,
432    _Scheduler_priority_SMP_Insert_ready_lifo,
433    _Scheduler_SMP_Insert_scheduled_lifo
434  );
435}
436
437/*
438 * This method is unique to this scheduler because it must
439 * invoke _Scheduler_SMP_Enqueue_scheduled_ordered() with
440 * this scheduler's get_highest_ready() helper.
441 */
442static Thread_Control *
443_Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
444  Scheduler_Context    *context,
445  Scheduler_Node       *node,
446  Chain_Node_order      order,
447  Scheduler_SMP_Insert  insert_ready,
448  Scheduler_SMP_Insert  insert_scheduled
449)
450{
451  return _Scheduler_SMP_Enqueue_scheduled_ordered(
452    context,
453    node,
454    order,
455    _Scheduler_priority_SMP_Extract_from_ready,
456    _Scheduler_priority_affinity_SMP_Get_highest_ready,
457    insert_ready,
458    insert_scheduled,
459    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
460    _Scheduler_SMP_Allocate_processor_exact
461  );
462}
463
464/*
465 *  This is unique to this scheduler because it is on the path
466 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
467 *  invokes a scheduler unique get_lowest_scheduled helper.
468 */
469static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
470  Scheduler_Context *context,
471  Scheduler_Node    *node
472)
473{
474  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
475    context,
476    node,
477    _Scheduler_SMP_Insert_priority_lifo_order,
478    _Scheduler_priority_SMP_Insert_ready_lifo,
479    _Scheduler_SMP_Insert_scheduled_lifo
480  );
481}
482
483/*
484 *  This is unique to this scheduler because it is on the path
485 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
486 *  invokes a scheduler unique get_lowest_scheduled helper.
487 */
488static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
489  Scheduler_Context *context,
490  Scheduler_Node    *node
491)
492{
493  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
494    context,
495    node,
496    _Scheduler_SMP_Insert_priority_fifo_order,
497    _Scheduler_priority_SMP_Insert_ready_fifo,
498    _Scheduler_SMP_Insert_scheduled_fifo
499  );
500}
501
502static bool _Scheduler_priority_affinity_SMP_Do_ask_for_help(
503  Scheduler_Context *context,
504  Thread_Control    *the_thread,
505  Scheduler_Node    *node
506)
507{
508  return _Scheduler_SMP_Ask_for_help(
509    context,
510    the_thread,
511    node,
512    _Scheduler_SMP_Insert_priority_lifo_order,
513    _Scheduler_priority_SMP_Insert_ready_lifo,
514    _Scheduler_SMP_Insert_scheduled_lifo,
515    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
516    _Scheduler_SMP_Get_lowest_scheduled,
517    _Scheduler_SMP_Allocate_processor_lazy
518  );
519}
520
521void _Scheduler_priority_affinity_SMP_Update_priority(
522  const Scheduler_Control *scheduler,
523  Thread_Control          *thread,
524  Scheduler_Node          *node
525)
526{
527  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
528
529  _Scheduler_SMP_Update_priority(
530    context,
531    thread,
532    node,
533    _Scheduler_priority_SMP_Extract_from_ready,
534    _Scheduler_priority_SMP_Do_update,
535    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
536    _Scheduler_priority_affinity_SMP_Enqueue_lifo,
537    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
538    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo,
539    _Scheduler_priority_affinity_SMP_Do_ask_for_help
540  );
541
542  /*
543   * Perform any thread migrations that are needed due to these changes.
544   */
545  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
546}
547
548bool _Scheduler_priority_affinity_SMP_Ask_for_help(
549  const Scheduler_Control *scheduler,
550  Thread_Control          *the_thread,
551  Scheduler_Node          *node
552)
553{
554  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
555
556  return _Scheduler_priority_affinity_SMP_Do_ask_for_help( context, the_thread, node );
557}
558
559void _Scheduler_priority_affinity_SMP_Reconsider_help_request(
560  const Scheduler_Control *scheduler,
561  Thread_Control          *the_thread,
562  Scheduler_Node          *node
563)
564{
565  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
566
567  _Scheduler_SMP_Reconsider_help_request(
568    context,
569    the_thread,
570    node,
571    _Scheduler_priority_SMP_Extract_from_ready
572  );
573}
574
575void _Scheduler_priority_affinity_SMP_Withdraw_node(
576  const Scheduler_Control *scheduler,
577  Thread_Control          *the_thread,
578  Scheduler_Node          *node,
579  Thread_Scheduler_state   next_state
580)
581{
582  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
583
584  _Scheduler_SMP_Withdraw_node(
585    context,
586    the_thread,
587    node,
588    next_state,
589    _Scheduler_priority_SMP_Extract_from_ready,
590    _Scheduler_priority_affinity_SMP_Get_highest_ready,
591    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
592    _Scheduler_SMP_Allocate_processor_lazy
593  );
594}
595
596/*
597 * This is the public scheduler specific Change Priority operation.
598 */
599bool _Scheduler_priority_affinity_SMP_Get_affinity(
600  const Scheduler_Control *scheduler,
601  Thread_Control          *thread,
602  size_t                   cpusetsize,
603  cpu_set_t               *cpuset
604)
605{
606  Scheduler_priority_affinity_SMP_Node *node =
607    _Scheduler_priority_affinity_SMP_Thread_get_node(thread);
608
609  (void) scheduler;
610
611  if ( node->Affinity.setsize != cpusetsize ) {
612    return false;
613  }
614
615  CPU_COPY( cpuset, node->Affinity.set );
616  return true;
617}
618
619bool _Scheduler_priority_affinity_SMP_Set_affinity(
620  const Scheduler_Control *scheduler,
621  Thread_Control          *thread,
622  size_t                   cpusetsize,
623  const cpu_set_t         *cpuset
624)
625{
626  Scheduler_priority_affinity_SMP_Node *node;
627  States_Control                        current_state;
628
629  /*
630   * Validate that the cpset meets basic requirements.
631   */
632  if ( !_CPU_set_Is_valid( cpuset, cpusetsize ) ) {
633    return false;
634  }
635
636  node = _Scheduler_priority_affinity_SMP_Thread_get_node( thread );
637
638  /*
639   * The old and new set are the same, there is no point in
640   * doing anything.
641   */
642  if ( CPU_EQUAL_S( cpusetsize, cpuset, node->Affinity.set ) )
643    return true;
644
645  current_state = thread->current_state;
646
647  if ( _States_Is_ready( current_state ) ) {
648    _Scheduler_priority_affinity_SMP_Block( scheduler, thread, &node->Base.Base.Base );
649  }
650
651  CPU_COPY( node->Affinity.set, cpuset );
652
653  if ( _States_Is_ready( current_state ) ) {
654    /*
655     * FIXME: Do not ignore threads in need for help.
656     */
657    (void) _Scheduler_priority_affinity_SMP_Unblock( scheduler, thread, &node->Base.Base.Base );
658  }
659
660  return true;
661}
Note: See TracBrowser for help on using the repository browser.