Changeset 6c23252 in rtems


Ignore:
Timestamp:
Jun 16, 2021, 6:00:16 AM (7 weeks ago)
Author:
Richi Dubey <richidubey@…>
Branches:
master
Children:
3705962
Parents:
be96cb43
git-author:
Richi Dubey <richidubey@…> (06/16/21 06:00:16)
git-committer:
Sebastian Huber <sebastian.huber@…> (06/24/21 12:16:21)
Message:

Update Strong APA Scheduler

This change allows for the migration of higher priority tasks on the
arrival of a lower priority task limited by affinity constraints.

Change license to BSD-2-Clause according to file history and
re-licensing agreement.

Update #3053.

Location:
cpukit
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • cpukit/include/rtems/scheduler.h

    rbe96cb43 r6c23252  
    252252  #include <rtems/score/schedulerstrongapa.h>
    253253
     254  #ifndef CONFIGURE_MAXIMUM_PROCESSORS
     255    #error "CONFIGURE_MAXIMUM_PROCESSORS must be defined to configure the Strong APA scheduler"
     256  #endif
     257
    254258  #define SCHEDULER_STRONG_APA_CONTEXT_NAME( name ) \
    255259    SCHEDULER_CONTEXT_NAME( strong_APA_ ## name )
     
    258262    static struct { \
    259263      Scheduler_strong_APA_Context Base; \
    260       Chain_Control                Ready[ ( prio_count ) ]; \
     264      Scheduler_strong_APA_CPU CPU[ CONFIGURE_MAXIMUM_PROCESSORS ]; \
    261265    } SCHEDULER_STRONG_APA_CONTEXT_NAME( name )
    262266
     
    265269      &SCHEDULER_STRONG_APA_CONTEXT_NAME( name ).Base.Base.Base, \
    266270      SCHEDULER_STRONG_APA_ENTRY_POINTS, \
    267       RTEMS_ARRAY_SIZE( \
    268         SCHEDULER_STRONG_APA_CONTEXT_NAME( name ).Ready \
    269       ) - 1, \
     271      SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY, \
    270272      ( obj_name ) \
    271273      SCHEDULER_CONTROL_IS_NON_PREEMPT_MODE_SUPPORTED( false ) \
  • cpukit/include/rtems/score/schedulerstrongapa.h

    rbe96cb43 r6c23252  
     1/* SPDX-License-Identifier: BSD-2-Clause */
     2
    13/**
    24 * @file
     
    911
    1012/*
    11  * Copyright (c) 2013, 2018 embedded brains GmbH.  All rights reserved.
    12  *
    13  *  embedded brains GmbH
    14  *  Dornierstr. 4
    15  *  82178 Puchheim
    16  *  Germany
    17  *  <rtems@embedded-brains.de>
    18  *
    19  * The license and distribution terms for this file may be
    20  * found in the file LICENSE in this distribution or at
    21  * http://www.rtems.org/license/LICENSE.
     13 * Copyright (C) 2020 Richi Dubey
     14 * Copyright (C) 2013, 2018 embedded brains GmbH (http://www.embedded-brains.de)
     15 *
     16 * Redistribution and use in source and binary forms, with or without
     17 * modification, are permitted provided that the following conditions
     18 * are met:
     19 * 1. Redistributions of source code must retain the above copyright
     20 *    notice, this list of conditions and the following disclaimer.
     21 * 2. Redistributions in binary form must reproduce the above copyright
     22 *    notice, this list of conditions and the following disclaimer in the
     23 *    documentation and/or other materials provided with the distribution.
     24 *
     25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35 * POSSIBILITY OF SUCH DAMAGE.
    2236 */
    2337
     
    2640
    2741#include <rtems/score/scheduler.h>
    28 #include <rtems/score/schedulerpriority.h>
    2942#include <rtems/score/schedulersmp.h>
    3043
     
    3346#endif /* __cplusplus */
    3447
     48/* Forward Declaration of Per_CPU_Control */
     49struct Per_CPU_Control;
     50
    3551/**
    3652 * @defgroup RTEMSScoreSchedulerStrongAPA Strong APA Scheduler
     
    4056 * @brief This group contains the Strong APA Scheduler implementation.
    4157 *
    42  * This is an implementation of the global fixed priority scheduler (G-FP).  It
    43  * uses one ready chain per priority to ensure constant time insert operations.
    44  * The scheduled chain uses linear insert operations and has at most processor
    45  * count entries.  Since the processor and priority count are constants all
    46  * scheduler operations complete in a bounded execution time.
    47  *
    48  * The the_thread preempt mode will be ignored.
    49  *
     58 * This is an implementation of the Strong APA scheduler defined by
     59 * Cerqueira et al. in Linux's Processor Affinity API, Refined:
     60 * Shifting Real-Time Tasks Towards Higher Schedulability.
     61 *
     62 * The scheduled and ready nodes are accessed via the
     63 * Scheduler_strong_APA_Context::Ready which helps in backtracking when a
     64 * node which is executing on a CPU gets blocked. New node is allocated to
     65 * the cpu by checking all the executing nodes in the affinity set of the
     66 * node and the subsequent nodes executing on the processors in its
     67 * affinity set.
    5068 * @{
    5169 */
    5270
    5371/**
    54  * @brief Scheduler context specialization for Strong APA
    55  * schedulers.
     72 * @brief Scheduler node specialization for Strong APA schedulers.
    5673 */
    5774typedef struct {
    58   Scheduler_SMP_Context    Base;
    59   Priority_bit_map_Control Bit_map;
    60   Chain_Control            Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
     75  /**
     76   * @brief SMP scheduler node.
     77   */
     78  Scheduler_SMP_Node Base;
     79
     80  /**
     81   * @brief Chain node for Scheduler_strong_APA_Context::Ready.
     82   */
     83  Chain_Node Ready_node;
     84
     85  /**
     86   * @brief CPU that this node would preempt in the backtracking part of
     87   * _Scheduler_strong_APA_Get_highest_ready and
     88   * _Scheduler_strong_APA_Do_Enqueue.
     89   */
     90  struct Per_CPU_Control *cpu_to_preempt;
     91
     92  /**
     93   * @brief The associated affinity set of this node.
     94   */
     95  Processor_mask Affinity;
     96} Scheduler_strong_APA_Node;
     97
     98
     99/**
     100 * @brief CPU related variables and a CPU_Control to implement BFS.
     101 */
     102typedef struct {
     103  /**
     104   * @brief CPU in a queue.
     105   */
     106  struct Per_CPU_Control *cpu;
     107
     108  /**
     109   * @brief The node that would preempt this CPU.
     110   */
     111  Scheduler_Node *preempting_node;
     112
     113  /**
     114   * @brief Whether or not this cpu has been added to the queue
     115   * (visited in BFS).
     116   */
     117  bool visited;
     118
     119  /**
     120   * @brief The node currently executing on this cpu.
     121   */
     122  Scheduler_Node *executing;
     123} Scheduler_strong_APA_CPU;
     124
     125/**
     126 * @brief Scheduler context and node definition for Strong APA scheduler.
     127 */
     128typedef struct {
     129  /**
     130   * @brief @see Scheduler_SMP_Context.
     131   */
     132  Scheduler_SMP_Context Base;
     133
     134  /**
     135   * @brief Chain of all the ready and scheduled nodes present in
     136   * the Strong APA scheduler.
     137   */
     138  Chain_Control Ready;
     139
     140  /**
     141   * @brief Stores cpu-specific variables.
     142   */
     143  Scheduler_strong_APA_CPU CPU[ RTEMS_ZERO_LENGTH_ARRAY ];
    61144} Scheduler_strong_APA_Context;
    62145
    63 /**
    64  * @brief Scheduler node specialization for Strong APA
    65  * schedulers.
    66  */
    67 typedef struct {
    68   /**
    69    * @brief SMP scheduler node.
    70    */
    71   Scheduler_SMP_Node Base;
    72 
    73   /**
    74    * @brief The associated ready queue of this node.
    75    */
    76   Scheduler_priority_Ready_queue Ready_queue;
    77 } Scheduler_strong_APA_Node;
     146#define SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY 255
    78147
    79148/**
     
    102171    _Scheduler_default_Cancel_job, \
    103172    _Scheduler_default_Tick, \
    104     _Scheduler_SMP_Start_idle \
    105     SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
     173    _Scheduler_strong_APA_Start_idle, \
     174    _Scheduler_strong_APA_Set_affinity \
    106175  }
    107176
     
    170239 * @brief Asks for help.
    171240 *
    172  * @param  scheduler The scheduler control instance.
     241 * @param scheduler The scheduler control instance.
    173242 * @param the_thread The thread that asks for help.
    174243 * @param node The node of @a the_thread.
     
    248317);
    249318
     319/**
     320 * @brief Starts an idle thread.
     321 *
     322 * @param scheduler The scheduler instance.
     323 * @param[in, out] the_thread An idle thread.
     324 * @param cpu The cpu for the operation.
     325 */
     326void _Scheduler_strong_APA_Start_idle(
     327  const Scheduler_Control *scheduler,
     328  Thread_Control          *idle,
     329  struct Per_CPU_Control  *cpu
     330);
     331
     332/**
     333 * @brief Sets the affinity .
     334 *
     335 * @param scheduler The scheduler control instance.
     336 * @param the_thread The thread to yield.
     337 * @param[in, out] node The node of @a the_thread.
     338 */
     339Status_Control _Scheduler_strong_APA_Set_affinity(
     340  const Scheduler_Control *scheduler,
     341  Thread_Control          *thread,
     342  Scheduler_Node          *node_base,
     343  const Processor_mask    *affinity
     344);
     345
    250346/** @} */
    251347
  • cpukit/score/src/schedulerstrongapa.c

    rbe96cb43 r6c23252  
     1/* SPDX-License-Identifier: BSD-2-Clause */
     2
    13/**
    24 * @file
     
    68 * @brief This source file contains the implementation of
    79 *   _Scheduler_strong_APA_Add_processor(),
     10 *   _Scheduler_strong_APA_Allocate_processor(),
    811 *   _Scheduler_strong_APA_Ask_for_help(), _Scheduler_strong_APA_Block(),
    9  *   _Scheduler_strong_APA_Initialize(),
     12 *   _Scheduler_strong_APA_Do_ask_for_help(),
     13 *   _Scheduler_strong_APA_Do_enqueue(),
     14 *   _Scheduler_strong_APA_Do_set_affinity(),
     15 *   _Scheduler_strong_APA_Do_update(), _Scheduler_strong_APA_Enqueue(),
     16 *   _Scheduler_strong_APA_Enqueue_scheduled(),
     17 *   _Scheduler_strong_APA_Extract_from_ready(),
     18 *   _Scheduler_strong_APA_Extract_from_scheduled(),
     19 *   _Scheduler_strong_APA_Find_highest_ready(),
     20 *   _Scheduler_strong_APA_Get_highest_ready(),
     21 *   _Scheduler_strong_APA_Get_lowest_reachable(),
     22 *   _Scheduler_strong_APA_Get_lowest_scheduled(),
     23 *   _Scheduler_strong_APA_Has_ready(),
     24 *   _Scheduler_strong_APA_Initialize(), _Scheduler_strong_APA_Insert_ready(),
     25 *   _Scheduler_strong_APA_Move_from_ready_to_scheduled(),
     26 *   _Scheduler_strong_APA_Move_from_scheduled_to_ready(),
    1027 *   _Scheduler_strong_APA_Node_initialize(),
    1128 *   _Scheduler_strong_APA_Reconsider_help_request(),
    12  *   _Scheduler_strong_APA_Remove_processor(), _Scheduler_strong_APA_Unblock(),
    13  *   _Scheduler_strong_APA_Update_priority(),
     29 *   _Scheduler_strong_APA_Register_idle(),
     30 *   _Scheduler_strong_APA_Remove_processor(),
     31 *   _Scheduler_strong_APA_Set_affinity(),
     32 *   _Scheduler_strong_APA_Set_scheduled(), _Scheduler_strong_APA_Start_idle(),
     33 *   _Scheduler_strong_APA_Unblock(), _Scheduler_strong_APA_Update_priority(),
    1434 *   _Scheduler_strong_APA_Withdraw_node(), and _Scheduler_strong_APA_Yield().
    1535 */
    1636
    1737/*
    18  * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
     38 * Copyright (C) 2020 Richi Dubey
     39 * Copyright (C) 2013, 2016 embedded brains GmbH (http://www.embedded-brains.de)
    1940 *
    20  *  embedded brains GmbH
    21  *  Dornierstr. 4
    22  *  82178 Puchheim
    23  *  Germany
    24  *  <rtems@embedded-brains.de>
     41 * Redistribution and use in source and binary forms, with or without
     42 * modification, are permitted provided that the following conditions
     43 * are met:
     44 * 1. Redistributions of source code must retain the above copyright
     45 *    notice, this list of conditions and the following disclaimer.
     46 * 2. Redistributions in binary form must reproduce the above copyright
     47 *    notice, this list of conditions and the following disclaimer in the
     48 *    documentation and/or other materials provided with the distribution.
    2549 *
    26  * The license and distribution terms for this file may be
    27  * found in the file LICENSE in this distribution or at
    28  * http://www.rtems.org/license/LICENSE.
     50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     51 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     53 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     54 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     60 * POSSIBILITY OF SUCH DAMAGE.
    2961 */
    3062
     
    3466
    3567#include <rtems/score/schedulerstrongapa.h>
    36 #include <rtems/score/schedulerpriorityimpl.h>
    3768#include <rtems/score/schedulersmpimpl.h>
    38 
    39 static Scheduler_strong_APA_Context *_Scheduler_strong_APA_Get_self(
     69#include <rtems/score/assert.h>
     70
     71#define STRONG_SCHEDULER_NODE_OF_CHAIN( node ) \
     72  RTEMS_CONTAINER_OF( node, Scheduler_strong_APA_Node, Ready_node )
     73
     74static inline Scheduler_strong_APA_Context *
     75_Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler )
     76{
     77  return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler );
     78}
     79
     80static inline Scheduler_strong_APA_Context *
     81_Scheduler_strong_APA_Get_self( Scheduler_Context *context )
     82{
     83  return (Scheduler_strong_APA_Context *) context;
     84}
     85
     86static inline Scheduler_strong_APA_Node *
     87_Scheduler_strong_APA_Node_downcast( Scheduler_Node *node )
     88{
     89  return (Scheduler_strong_APA_Node *) node;
     90}
     91
     92static inline void _Scheduler_strong_APA_Do_update(
     93  Scheduler_Context *context,
     94  Scheduler_Node    *node,
     95  Priority_Control   new_priority
     96)
     97{
     98  Scheduler_SMP_Node *smp_node;
     99  (void) context;
     100
     101  smp_node = _Scheduler_SMP_Node_downcast( node );
     102  _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
     103}
     104
     105/*
     106 * Returns true if the Strong APA scheduler has ready nodes
     107 * available for scheduling.
     108 */
     109static inline bool _Scheduler_strong_APA_Has_ready(
    40110  Scheduler_Context *context
    41111)
    42112{
    43   return (Scheduler_strong_APA_Context *) context;
    44 }
    45 
    46 static Scheduler_strong_APA_Node *
    47 _Scheduler_strong_APA_Node_downcast( Scheduler_Node *node )
    48 {
    49   return (Scheduler_strong_APA_Node *) node;
    50 }
    51 
    52 static void _Scheduler_strong_APA_Move_from_scheduled_to_ready(
    53   Scheduler_Context *context,
    54   Scheduler_Node    *scheduled_to_ready
    55 )
    56 {
    57   Scheduler_strong_APA_Context *self =
    58     _Scheduler_strong_APA_Get_self( context );
    59   Scheduler_strong_APA_Node *node =
    60     _Scheduler_strong_APA_Node_downcast( scheduled_to_ready );
    61 
    62   _Chain_Extract_unprotected( &node->Base.Base.Node.Chain );
    63   _Scheduler_priority_Ready_queue_enqueue_first(
    64     &node->Base.Base.Node.Chain,
    65     &node->Ready_queue,
    66     &self->Bit_map
    67   );
    68 }
    69 
    70 static void _Scheduler_strong_APA_Move_from_ready_to_scheduled(
     113  Scheduler_strong_APA_Context *self;
     114  const Chain_Node             *tail;
     115  Chain_Node                   *next;
     116  Scheduler_strong_APA_Node    *node;
     117
     118  self = _Scheduler_strong_APA_Get_self( context );
     119  tail = _Chain_Immutable_tail( &self->Ready );
     120  next = _Chain_First( &self->Ready );
     121
     122  while ( next != tail ) {
     123    node = (Scheduler_strong_APA_Node *)STRONG_SCHEDULER_NODE_OF_CHAIN( next );
     124
     125    if (
     126      _Scheduler_SMP_Node_state( &node->Base.Base ) ==
     127      SCHEDULER_SMP_NODE_READY
     128    ) {
     129      return true;
     130    }
     131
     132    next = _Chain_Next( next );
     133  }
     134
     135  return false;
     136}
     137
     138static inline void _Scheduler_strong_APA_Set_scheduled(
     139  Scheduler_strong_APA_Context *self,
     140  Scheduler_Node                *executing,
     141  const Per_CPU_Control         *cpu
     142)
     143{
     144  self->CPU[ _Per_CPU_Get_index( cpu ) ].executing = executing;
     145}
     146
     147static inline Scheduler_Node *_Scheduler_strong_APA_Get_scheduled(
     148  const Scheduler_strong_APA_Context *self,
     149  const Per_CPU_Control               *cpu
     150)
     151{
     152  return self->CPU[ _Per_CPU_Get_index( cpu ) ].executing;
     153}
     154
     155static inline void _Scheduler_strong_APA_Allocate_processor(
     156  Scheduler_Context *context,
     157  Scheduler_Node    *scheduled_base,
     158  Scheduler_Node    *victim_base,
     159  Per_CPU_Control   *victim_cpu
     160)
     161{
     162  Scheduler_strong_APA_Node    *scheduled;
     163  Scheduler_strong_APA_Context *self;
     164
     165  (void) victim_base;
     166
     167  scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base );
     168  self = _Scheduler_strong_APA_Get_self( context );
     169
     170  _Scheduler_strong_APA_Set_scheduled( self, scheduled_base, victim_cpu );
     171
     172  _Scheduler_SMP_Allocate_processor_exact(
     173    context,
     174    &( scheduled->Base.Base ),
     175    NULL,
     176    victim_cpu
     177  );
     178}
     179
     180/*
     181 * Finds and returns the highest ready node present by accessing the
     182 * _Strong_APA_Context->CPU with front and rear values.
     183 */
     184static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(
     185  Scheduler_strong_APA_Context *self,
     186  uint32_t                      front,
     187  uint32_t                      rear
     188)
     189{
     190  Scheduler_Node              *highest_ready;
     191  Scheduler_strong_APA_CPU    *CPU;
     192  const Chain_Node            *tail;
     193  Chain_Node                  *next;
     194  Scheduler_strong_APA_Node   *node;
     195  Priority_Control             min_priority_num;
     196  Priority_Control             curr_priority;
     197  Per_CPU_Control             *assigned_cpu;
     198  Scheduler_SMP_Node_state     curr_state;
     199  Per_CPU_Control             *curr_CPU;
     200
     201  CPU = self->CPU;
     202  /*
     203   * When the first task accessed has nothing to compare its priority against.
     204   * So, it is the task with the highest priority witnessed so far.
     205   */
     206  min_priority_num = UINT64_MAX;
     207
     208  while ( front <= rear ) {
     209    curr_CPU = CPU[ front++ ].cpu;
     210
     211    tail = _Chain_Immutable_tail( &self->Ready );
     212    next = _Chain_First( &self->Ready );
     213
     214    while ( next != tail ) {
     215      node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next );
     216      /*
     217       * Check if the curr_CPU is in the affinity set of the node.
     218       */
     219      if (
     220        _Processor_mask_Is_set( &node->Affinity, _Per_CPU_Get_index( curr_CPU ) )
     221      ) {
     222        curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
     223
     224        if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
     225          assigned_cpu = _Thread_Get_CPU( node->Base.Base.user );
     226
     227          if ( CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited == false ) {
     228            CPU[ ++rear ].cpu = assigned_cpu;
     229            CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited = true;
     230            /*
     231             * The curr CPU of the queue invoked this node to add its CPU
     232             * that it is executing on to the queue. So this node might get
     233             * preempted because of the invoker curr_CPU and this curr_CPU
     234             * is the CPU that node should preempt in case this node
     235             * gets preempted.
     236             */
     237            node->cpu_to_preempt = curr_CPU;
     238          }
     239        } else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
     240          curr_priority = _Scheduler_Node_get_priority( &node->Base.Base );
     241          curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
     242
     243          if (
     244            min_priority_num == UINT64_MAX ||
     245            curr_priority < min_priority_num
     246          ) {
     247            min_priority_num = curr_priority;
     248            highest_ready = &node->Base.Base;
     249            /*
     250             * In case curr_CPU is filter_CPU, we need to store the
     251             * cpu_to_preempt value so that we go back to SMP_*
     252             * function, rather than preempting the node ourselves.
     253             */
     254            node->cpu_to_preempt = curr_CPU;
     255          }
     256        }
     257      }
     258    next = _Chain_Next( next );
     259    }
     260  }
     261
     262  return highest_ready;
     263}
     264
     265static inline void  _Scheduler_strong_APA_Move_from_ready_to_scheduled(
    71266  Scheduler_Context *context,
    72267  Scheduler_Node    *ready_to_scheduled
     268)
     269{
     270  Priority_Control insert_priority;
     271
     272  insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
     273  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
     274  _Scheduler_SMP_Insert_scheduled(
     275    context,
     276    ready_to_scheduled,
     277    insert_priority
     278  );
     279}
     280
     281static inline void _Scheduler_strong_APA_Insert_ready(
     282  Scheduler_Context *context,
     283  Scheduler_Node    *node_base,
     284  Priority_Control   insert_priority
    73285)
    74286{
    75287  Scheduler_strong_APA_Context *self;
    76288  Scheduler_strong_APA_Node    *node;
    77   Priority_Control              insert_priority;
    78289
    79290  self = _Scheduler_strong_APA_Get_self( context );
    80   node = _Scheduler_strong_APA_Node_downcast( ready_to_scheduled );
    81 
    82   _Scheduler_priority_Ready_queue_extract(
    83     &node->Base.Base.Node.Chain,
    84     &node->Ready_queue,
    85     &self->Bit_map
    86   );
    87   insert_priority = _Scheduler_SMP_Node_priority( &node->Base.Base );
    88   insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
    89   _Chain_Insert_ordered_unprotected(
    90     &self->Base.Scheduled,
    91     &node->Base.Base.Node.Chain,
    92     &insert_priority,
    93     _Scheduler_SMP_Priority_less_equal
    94   );
    95 }
    96 
    97 static void _Scheduler_strong_APA_Insert_ready(
    98   Scheduler_Context *context,
    99   Scheduler_Node    *node_base,
    100   Priority_Control   insert_priority
     291  node = _Scheduler_strong_APA_Node_downcast( node_base );
     292
     293  if( _Chain_Is_node_off_chain( &node->Ready_node ) ) {
     294    _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
     295  }  else {
     296    _Chain_Extract_unprotected( &node->Ready_node );
     297    _Chain_Set_off_chain( &node->Ready_node );
     298    _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
     299  }
     300}
     301
     302static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready(
     303  Scheduler_Context *context,
     304  Scheduler_Node    *scheduled_to_ready
     305)
     306{
     307  Priority_Control insert_priority;
     308
     309  if( !_Chain_Is_node_off_chain(  &scheduled_to_ready->Node.Chain ) ) {
     310    _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
     311  }
     312
     313  insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
     314
     315  _Scheduler_strong_APA_Insert_ready(
     316    context,
     317    scheduled_to_ready,
     318    insert_priority
     319  );
     320}
     321
     322/*
     323 * Implement the BFS Algorithm for task departure to get the highest ready task
     324 * for a particular CPU, returns the highest ready Scheduler_Node
     325 * Scheduler_Node filter here points to the victim node that is blocked
     326 * resulting which this function is called.
     327 */
     328static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
     329  Scheduler_Context *context,
     330  Scheduler_Node    *filter
     331)
     332{
     333  Scheduler_strong_APA_Context *self;
     334  Per_CPU_Control              *filter_cpu;
     335  Scheduler_strong_APA_Node    *node;
     336  Scheduler_Node               *highest_ready;
     337  Scheduler_Node               *curr_node;
     338  Scheduler_Node               *next_node;
     339  Scheduler_strong_APA_CPU     *CPU;
     340  uint32_t                      front;
     341  uint32_t                      rear;
     342  uint32_t                      cpu_max;
     343  uint32_t                      cpu_index;
     344
     345  self = _Scheduler_strong_APA_Get_self( context );
     346  /*
     347   * Denotes front and rear of the queue
     348   */
     349  front = 0;
     350  rear = -1;
     351
     352  filter_cpu = _Thread_Get_CPU( filter->user );
     353  CPU = self->CPU;
     354  cpu_max = _SMP_Get_processor_maximum();
     355
     356  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
     357    CPU[ cpu_index ].visited = false;
     358  }
     359
     360  CPU[ ++rear ].cpu = filter_cpu;
     361  CPU[ _Per_CPU_Get_index( filter_cpu ) ].visited = true;
     362
     363  highest_ready = _Scheduler_strong_APA_Find_highest_ready(
     364                    self,
     365                    front,
     366                    rear
     367                  );
     368
     369  if ( highest_ready != filter ) {
     370    /*
     371     * Backtrack on the path from
     372     * filter_cpu to highest_ready, shifting along every task.
     373     */
     374
     375    node = _Scheduler_strong_APA_Node_downcast( highest_ready );
     376    /*
     377     * Highest ready is not just directly reachable from the victim cpu
     378     * So there is need for task shifting.
     379     */
     380    while ( node->cpu_to_preempt != filter_cpu ) {
     381      curr_node = &node->Base.Base;
     382      next_node = _Scheduler_strong_APA_Get_scheduled(
     383                    self,
     384                    node->cpu_to_preempt
     385                  );
     386
     387      (void) _Scheduler_SMP_Preempt(
     388               context,
     389               curr_node,
     390               next_node,
     391               _Scheduler_strong_APA_Allocate_processor
     392             );
     393
     394      if ( curr_node == highest_ready ) {
     395        _Scheduler_strong_APA_Move_from_ready_to_scheduled( context, curr_node );
     396      }
     397
     398      node = _Scheduler_strong_APA_Node_downcast( next_node );
     399    }
     400    /*
     401     * To save the last node so that the caller SMP_* function
     402     * can do the allocation
     403     */
     404      curr_node = &node->Base.Base;
     405      highest_ready = curr_node;
     406      _Scheduler_strong_APA_Move_from_scheduled_to_ready( context, curr_node );
     407    }
     408
     409  return highest_ready;
     410}
     411
     412/*
     413 * Checks the lowest scheduled directly reachable task
     414 */
     415static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled(
     416  Scheduler_Context *context,
     417  Scheduler_Node    *filter_base
     418)
     419{
     420  uint32_t                      cpu_max;
     421  uint32_t                      cpu_index;
     422  Scheduler_Node               *curr_node;
     423  Scheduler_Node               *lowest_scheduled = NULL;
     424  Priority_Control              max_priority_num;
     425  Priority_Control              curr_priority;
     426  Scheduler_strong_APA_Node    *filter_strong_node;
     427  Scheduler_strong_APA_Context *self;
     428
     429  self = _Scheduler_strong_APA_Get_self( context );
     430  max_priority_num = 0;    /* Max (Lowest) priority encountered so far */
     431  filter_strong_node = _Scheduler_strong_APA_Node_downcast( filter_base );
     432
     433  /* lowest_scheduled is NULL if affinity of a node is 0 */
     434  _Assert( !_Processor_mask_Is_zero( &filter_strong_node->Affinity ) );
     435  cpu_max = _SMP_Get_processor_maximum();
     436
     437  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
     438    /* Checks if the CPU is in the affinity set of filter_strong_node */
     439    if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index ) ) {
     440      Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
     441
     442      if ( _Per_CPU_Is_processor_online( cpu ) ) {
     443        curr_node = _Scheduler_strong_APA_Get_scheduled( self, cpu );
     444        curr_priority = _Scheduler_Node_get_priority( curr_node );
     445        curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
     446
     447        if ( curr_priority > max_priority_num ) {
     448          lowest_scheduled = curr_node;
     449          max_priority_num = curr_priority;
     450        }
     451      }
     452    }
     453  }
     454
     455  _Assert( lowest_scheduled != NULL );
     456  return lowest_scheduled;
     457}
     458
     459static inline void _Scheduler_strong_APA_Extract_from_scheduled(
     460  Scheduler_Context *context,
     461  Scheduler_Node    *node_to_extract
    101462)
    102463{
     
    105466
    106467  self = _Scheduler_strong_APA_Get_self( context );
    107   node = _Scheduler_strong_APA_Node_downcast( node_base );
    108 
    109   if ( SCHEDULER_PRIORITY_IS_APPEND( insert_priority ) ) {
    110     _Scheduler_priority_Ready_queue_enqueue(
    111       &node->Base.Base.Node.Chain,
    112       &node->Ready_queue,
    113       &self->Bit_map
     468  node = _Scheduler_strong_APA_Node_downcast( node_to_extract );
     469
     470  _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base );
     471  /* Not removing it from Ready since the node could go in the READY state */
     472}
     473
     474static inline void _Scheduler_strong_APA_Extract_from_ready(
     475  Scheduler_Context *context,
     476  Scheduler_Node    *node_to_extract
     477)
     478{
     479  Scheduler_strong_APA_Node    *node;
     480
     481  node = _Scheduler_strong_APA_Node_downcast( node_to_extract );
     482
     483  if( !_Chain_Is_node_off_chain( &node->Ready_node ) ) {
     484    _Chain_Extract_unprotected( &node->Ready_node );
     485    _Chain_Set_off_chain( &node->Ready_node );
     486  }
     487
     488}
     489
     490static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable(
     491  Scheduler_strong_APA_Context *self,
     492  uint32_t                      front,
     493  uint32_t                      rear,
     494  Per_CPU_Control             **cpu_to_preempt
     495)
     496{
     497  Scheduler_Node              *lowest_reachable;
     498  Priority_Control             max_priority_num;
     499  uint32_t                     cpu_max;
     500  uint32_t                     cpu_index;
     501  Thread_Control              *curr_thread;
     502  Per_CPU_Control             *curr_CPU;
     503  Priority_Control             curr_priority;
     504  Scheduler_Node              *curr_node;
     505  Scheduler_strong_APA_Node   *curr_strong_node;
     506  Scheduler_strong_APA_CPU    *CPU;
     507
     508  /* Max (Lowest) priority encountered so far */
     509  max_priority_num = 0;
     510  CPU = self->CPU;
     511  cpu_max = _SMP_Get_processor_maximum();
     512
     513  while ( front <= rear ) {
     514    curr_CPU = CPU[ front ].cpu;
     515    front = front + 1;
     516
     517    curr_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
     518    curr_thread = curr_node->user;
     519
     520    curr_priority = _Scheduler_Node_get_priority( curr_node );
     521    curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
     522
     523    curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
     524
     525    if ( curr_priority > max_priority_num ) {
     526      lowest_reachable = curr_node;
     527      max_priority_num = curr_priority;
     528      *cpu_to_preempt = curr_CPU;
     529    }
     530
     531    if ( !curr_thread->is_idle ) {
     532      for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
     533        if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) {
     534          /* Checks if the thread_CPU is in the affinity set of the node */
     535          Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
     536          if (
     537            _Per_CPU_Is_processor_online( cpu ) &&
     538            CPU[ cpu_index ].visited == false )
     539          {
     540            rear = rear + 1;
     541            CPU[ rear ].cpu = cpu;
     542            CPU[ cpu_index ].visited = true;
     543            CPU[ cpu_index ].preempting_node = curr_node;
     544          }
     545        }
     546      }
     547    }
     548  }
     549
     550  return lowest_reachable;
     551}
     552
     553static inline bool _Scheduler_strong_APA_Do_enqueue(
     554  Scheduler_Context *context,
     555  Scheduler_Node    *lowest_reachable,
     556  Scheduler_Node    *node,
     557  Priority_Control  insert_priority,
     558  Per_CPU_Control  *cpu_to_preempt
     559)
     560{
     561  bool                          needs_help;
     562  Priority_Control              node_priority;
     563  Priority_Control              lowest_priority;
     564  Scheduler_strong_APA_CPU     *CPU;
     565  Scheduler_Node               *curr_node;
     566  /* The first node that gets removed from the cpu */
     567  Scheduler_Node               *first_node;
     568  Scheduler_strong_APA_Node    *curr_strong_node;
     569  Per_CPU_Control              *curr_CPU;
     570  Scheduler_strong_APA_Context *self;
     571  Scheduler_Node               *next_node;
     572
     573
     574  self = _Scheduler_strong_APA_Get_self( context );
     575  CPU = self->CPU;
     576
     577  node_priority = _Scheduler_Node_get_priority( node );
     578  node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
     579
     580  if( lowest_reachable == NULL ) {
     581    /*
     582     * This means the affinity set of the newly arrived node
     583     * is empty.
     584     */
     585    lowest_priority = UINT64_MAX;
     586  } else {
     587    lowest_priority =  _Scheduler_Node_get_priority( lowest_reachable );
     588    lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority );
     589  }
     590
     591  if ( lowest_priority > node_priority ) {
     592    /*
     593     * Backtrack on the path from
     594     * _Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting
     595     * along every task
     596     */
     597
     598    curr_node = CPU[ _Per_CPU_Get_index( cpu_to_preempt ) ].preempting_node;
     599    curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
     600    curr_strong_node->cpu_to_preempt = cpu_to_preempt;
     601
     602    /* Save which cpu to preempt in cpu_to_preempt value of the node */
     603    while ( curr_node != node ) {
     604      curr_CPU = _Thread_Get_CPU( curr_node->user );
     605      curr_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].preempting_node;
     606      curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
     607      curr_strong_node->cpu_to_preempt =  curr_CPU;
     608     }
     609
     610    curr_CPU = curr_strong_node->cpu_to_preempt;
     611    next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
     612
     613    node_priority = _Scheduler_Node_get_priority( curr_node );
     614    node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
     615
     616    _Scheduler_SMP_Enqueue_to_scheduled(
     617      context,
     618      curr_node,
     619      node_priority,
     620      next_node,
     621      _Scheduler_SMP_Insert_scheduled,
     622      _Scheduler_strong_APA_Move_from_scheduled_to_ready,
     623      _Scheduler_strong_APA_Allocate_processor
    114624    );
     625
     626    curr_node = next_node;
     627    first_node = curr_node;
     628    curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
     629
     630    while ( curr_node != lowest_reachable ) {
     631      curr_CPU = curr_strong_node->cpu_to_preempt;
     632      next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
     633      /* curr_node preempts the next_node; */
     634      _Scheduler_SMP_Preempt(
     635        context,
     636        curr_node,
     637        next_node,
     638        _Scheduler_strong_APA_Allocate_processor
     639      );
     640
     641      if(curr_node == first_node) {
     642        _Scheduler_strong_APA_Move_from_ready_to_scheduled(context, first_node);
     643      }
     644      curr_node = next_node;
     645      curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
     646    }
     647
     648    _Scheduler_strong_APA_Move_from_scheduled_to_ready( context, lowest_reachable );
     649
     650    needs_help = false;
    115651  } else {
    116     _Scheduler_priority_Ready_queue_enqueue_first(
    117       &node->Base.Base.Node.Chain,
    118       &node->Ready_queue,
    119       &self->Bit_map
    120     );
    121   }
    122 }
    123 
    124 static void _Scheduler_strong_APA_Extract_from_ready(
    125   Scheduler_Context *context,
    126   Scheduler_Node    *the_thread
    127 )
    128 {
    129   Scheduler_strong_APA_Context *self =
    130     _Scheduler_strong_APA_Get_self( context );
    131   Scheduler_strong_APA_Node *node =
    132     _Scheduler_strong_APA_Node_downcast( the_thread );
    133 
    134   _Scheduler_priority_Ready_queue_extract(
    135     &node->Base.Base.Node.Chain,
    136     &node->Ready_queue,
    137     &self->Bit_map
    138   );
    139 }
    140 
    141 static void _Scheduler_strong_APA_Do_update(
    142   Scheduler_Context *context,
    143   Scheduler_Node *node_to_update,
    144   Priority_Control new_priority
    145 )
    146 {
    147   Scheduler_strong_APA_Context *self =
    148     _Scheduler_strong_APA_Get_self( context );
    149   Scheduler_strong_APA_Node *node =
    150     _Scheduler_strong_APA_Node_downcast( node_to_update );
    151 
    152   _Scheduler_SMP_Node_update_priority( &node->Base, new_priority );
    153   _Scheduler_priority_Ready_queue_update(
    154     &node->Ready_queue,
    155     SCHEDULER_PRIORITY_UNMAP( new_priority ),
    156     &self->Bit_map,
    157     &self->Ready[ 0 ]
    158   );
    159 }
    160 
    161 static Scheduler_strong_APA_Context *
    162 _Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler )
    163 {
    164   return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler );
    165 }
    166 
    167 void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler )
    168 {
    169   Scheduler_strong_APA_Context *self =
    170     _Scheduler_strong_APA_Get_context( scheduler );
    171 
    172   _Scheduler_SMP_Initialize( &self->Base );
    173   _Priority_bit_map_Initialize( &self->Bit_map );
    174   _Scheduler_priority_Ready_queue_initialize(
    175     &self->Ready[ 0 ],
    176     scheduler->maximum_priority
    177   );
    178 }
    179 
    180 void _Scheduler_strong_APA_Node_initialize(
    181   const Scheduler_Control *scheduler,
    182   Scheduler_Node          *node,
    183   Thread_Control          *the_thread,
    184   Priority_Control         priority
    185 )
    186 {
    187   Scheduler_Context            *context;
    188   Scheduler_strong_APA_Context *self;
    189   Scheduler_strong_APA_Node    *the_node;
    190 
    191   the_node = _Scheduler_strong_APA_Node_downcast( node );
    192   _Scheduler_SMP_Node_initialize(
    193     scheduler,
    194     &the_node->Base,
    195     the_thread,
    196     priority
    197   );
    198 
    199   context = _Scheduler_Get_context( scheduler );
    200   self = _Scheduler_strong_APA_Get_self( context );
    201   _Scheduler_priority_Ready_queue_update(
    202     &the_node->Ready_queue,
    203     SCHEDULER_PRIORITY_UNMAP( priority ),
    204     &self->Bit_map,
    205     &self->Ready[ 0 ]
    206   );
    207 }
    208 
    209 static bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context )
    210 {
    211   Scheduler_strong_APA_Context *self =
    212     _Scheduler_strong_APA_Get_self( context );
    213 
    214   return !_Priority_bit_map_Is_empty( &self->Bit_map );
    215 }
    216 
    217 static Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
    218   Scheduler_Context *context,
    219   Scheduler_Node    *node
    220 )
    221 {
    222   Scheduler_strong_APA_Context *self =
    223     _Scheduler_strong_APA_Get_self( context );
    224 
    225   (void) node;
    226 
    227   return (Scheduler_Node *) _Scheduler_priority_Ready_queue_first(
    228     &self->Bit_map,
    229     &self->Ready[ 0 ]
    230   );
    231 }
    232 
    233 void _Scheduler_strong_APA_Block(
    234   const Scheduler_Control *scheduler,
    235   Thread_Control          *the_thread,
    236   Scheduler_Node          *node
    237 )
    238 {
    239   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
    240 
    241   _Scheduler_SMP_Block(
    242     context,
    243     the_thread,
    244     node,
    245     _Scheduler_SMP_Extract_from_scheduled,
    246     _Scheduler_strong_APA_Extract_from_ready,
    247     _Scheduler_strong_APA_Get_highest_ready,
    248     _Scheduler_strong_APA_Move_from_ready_to_scheduled,
    249     _Scheduler_SMP_Allocate_processor_exact
    250   );
    251 }
    252 
    253 static bool _Scheduler_strong_APA_Enqueue(
     652    needs_help = true;
     653  }
     654
     655  /* Add it to Ready chain since it is now either scheduled or just ready. */
     656  _Scheduler_strong_APA_Insert_ready( context,node, insert_priority );
     657
     658  return needs_help;
     659}
     660
     661/*
     662 * BFS Algorithm for task arrival
     663 * Enqueue node either in the scheduled chain or in the ready chain.
     664 * node is the newly arrived node and is currently not scheduled.
     665 */
     666static inline bool _Scheduler_strong_APA_Enqueue(
    254667  Scheduler_Context *context,
    255668  Scheduler_Node    *node,
     
    257670)
    258671{
    259   return _Scheduler_SMP_Enqueue(
    260     context,
    261     node,
    262     insert_priority,
    263     _Scheduler_SMP_Priority_less_equal,
    264     _Scheduler_strong_APA_Insert_ready,
    265     _Scheduler_SMP_Insert_scheduled,
    266     _Scheduler_strong_APA_Move_from_scheduled_to_ready,
    267     _Scheduler_SMP_Get_lowest_scheduled,
    268     _Scheduler_SMP_Allocate_processor_exact
    269   );
    270 }
    271 
    272 static bool _Scheduler_strong_APA_Enqueue_scheduled(
     672  Scheduler_strong_APA_Context *self;
     673  Scheduler_strong_APA_CPU     *CPU;
     674  uint32_t                      cpu_max;
     675  uint32_t                      cpu_index;
     676  Per_CPU_Control              *cpu_to_preempt;
     677  Scheduler_Node               *lowest_reachable;
     678  Scheduler_strong_APA_Node    *strong_node;
     679
     680  /* Denotes front and rear of the queue */
     681  uint32_t      front;
     682  uint32_t      rear;
     683
     684  front = 0;
     685  rear = -1;
     686
     687  self = _Scheduler_strong_APA_Get_self( context );
     688  strong_node = _Scheduler_strong_APA_Node_downcast( node );
     689  cpu_max = _SMP_Get_processor_maximum();
     690  CPU = self->CPU;
     691
     692  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
     693    CPU[ cpu_index ].visited = false;
     694
     695    /* Checks if the thread_CPU is in the affinity set of the node */
     696    if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index ) ) {
     697      Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
     698
     699      if ( _Per_CPU_Is_processor_online( cpu ) ) {
     700        rear = rear + 1;
     701        CPU[ rear ].cpu = cpu;
     702        CPU[ cpu_index ].visited = true;
     703        CPU[ cpu_index ].preempting_node = node;
     704      }
     705    }
     706  }
     707
     708  lowest_reachable = _Scheduler_strong_APA_Get_lowest_reachable(
     709                       self,
     710                       front,
     711                       rear,
     712                       &cpu_to_preempt
     713                     );
     714
     715  return _Scheduler_strong_APA_Do_enqueue(
     716           context,
     717           lowest_reachable,
     718           node,
     719           insert_priority,
     720           cpu_to_preempt
     721         );
     722}
     723
     724static inline bool _Scheduler_strong_APA_Enqueue_scheduled(
    273725  Scheduler_Context *context,
    274726  Scheduler_Node    *node,
    275   Priority_Control  insert_priority
     727  Priority_Control   insert_priority
    276728)
    277729{
     
    286738    _Scheduler_SMP_Insert_scheduled,
    287739    _Scheduler_strong_APA_Move_from_ready_to_scheduled,
    288     _Scheduler_SMP_Allocate_processor_exact
    289   );
    290 }
    291 
    292 void _Scheduler_strong_APA_Unblock(
    293   const Scheduler_Control *scheduler,
    294   Thread_Control          *the_thread,
    295   Scheduler_Node          *node
    296 )
    297 {
    298   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
    299 
    300   _Scheduler_SMP_Unblock(
    301     context,
    302     the_thread,
    303     node,
    304     _Scheduler_strong_APA_Do_update,
    305     _Scheduler_strong_APA_Enqueue
    306   );
    307 }
    308 
    309 static bool _Scheduler_strong_APA_Do_ask_for_help(
     740    _Scheduler_strong_APA_Allocate_processor
     741  );
     742}
     743
     744static inline bool _Scheduler_strong_APA_Do_ask_for_help(
    310745  Scheduler_Context *context,
    311746  Thread_Control    *the_thread,
     
    321756    _Scheduler_SMP_Insert_scheduled,
    322757    _Scheduler_strong_APA_Move_from_scheduled_to_ready,
    323     _Scheduler_SMP_Get_lowest_scheduled,
    324     _Scheduler_SMP_Allocate_processor_lazy
     758    _Scheduler_strong_APA_Get_lowest_scheduled,
     759    _Scheduler_strong_APA_Allocate_processor
     760  );
     761}
     762
     763static inline  void  _Scheduler_strong_APA_Do_set_affinity(
     764  Scheduler_Context *context,
     765  Scheduler_Node    *node_base,
     766  void              *arg
     767)
     768{
     769  Scheduler_strong_APA_Node *node;
     770
     771  node = _Scheduler_strong_APA_Node_downcast( node_base );
     772  node->Affinity = *( (const Processor_mask *) arg );
     773}
     774
     775void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler )
     776{
     777  Scheduler_strong_APA_Context *self =
     778      _Scheduler_strong_APA_Get_context( scheduler );
     779
     780  _Scheduler_SMP_Initialize( &self->Base );
     781  _Chain_Initialize_empty( &self->Ready );
     782}
     783
     784void _Scheduler_strong_APA_Yield(
     785  const Scheduler_Control *scheduler,
     786  Thread_Control          *thread,
     787  Scheduler_Node          *node
     788)
     789{
     790  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
     791
     792  _Scheduler_SMP_Yield(
     793    context,
     794    thread,
     795    node,
     796    _Scheduler_strong_APA_Extract_from_ready,
     797    _Scheduler_strong_APA_Enqueue,
     798    _Scheduler_strong_APA_Enqueue_scheduled
     799  );
     800}
     801
     802void _Scheduler_strong_APA_Block(
     803  const Scheduler_Control *scheduler,
     804  Thread_Control          *thread,
     805  Scheduler_Node          *node
     806)
     807{
     808  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
     809
     810  /*
     811   * Needed in case the node is scheduled node, since _SMP_Block only extracts
     812   * from the SMP scheduled chain and from the Strong APA Ready_chain
     813   * when the node is ready. But the Strong APA Ready_chain stores both
     814   * ready and scheduled nodes.
     815   */
     816  _Scheduler_strong_APA_Extract_from_ready(context, node);
     817
     818  _Scheduler_SMP_Block(
     819    context,
     820    thread,
     821    node,
     822    _Scheduler_strong_APA_Extract_from_scheduled,
     823    _Scheduler_strong_APA_Extract_from_ready,
     824    _Scheduler_strong_APA_Get_highest_ready,
     825    _Scheduler_strong_APA_Move_from_ready_to_scheduled,
     826    _Scheduler_strong_APA_Allocate_processor
     827  );
     828}
     829
     830void _Scheduler_strong_APA_Unblock(
     831  const Scheduler_Control *scheduler,
     832  Thread_Control          *thread,
     833  Scheduler_Node          *node
     834)
     835{
     836  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
     837
     838  _Scheduler_SMP_Unblock(
     839    context,
     840    thread,
     841    node,
     842    _Scheduler_strong_APA_Do_update,
     843    _Scheduler_strong_APA_Enqueue
    325844  );
    326845}
     
    328847void _Scheduler_strong_APA_Update_priority(
    329848  const Scheduler_Control *scheduler,
    330   Thread_Control          *the_thread,
     849  Thread_Control          *thread,
    331850  Scheduler_Node          *node
    332851)
     
    336855  _Scheduler_SMP_Update_priority(
    337856    context,
    338     the_thread,
     857    thread,
    339858    node,
    340859    _Scheduler_strong_APA_Extract_from_ready,
     
    354873  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
    355874
    356   return _Scheduler_strong_APA_Do_ask_for_help( context, the_thread, node );
     875  return _Scheduler_strong_APA_Do_ask_for_help(
     876    context,
     877    the_thread,
     878    node
     879  );
    357880}
    358881
     
    390913    _Scheduler_strong_APA_Get_highest_ready,
    391914    _Scheduler_strong_APA_Move_from_ready_to_scheduled,
    392     _Scheduler_SMP_Allocate_processor_lazy
    393   );
     915    _Scheduler_strong_APA_Allocate_processor
     916  );
     917}
     918
     919static inline void _Scheduler_strong_APA_Register_idle(
     920  Scheduler_Context *context,
     921  Scheduler_Node    *idle_base,
     922  Per_CPU_Control   *cpu
     923)
     924{
     925  Scheduler_strong_APA_Context *self;
     926  self = _Scheduler_strong_APA_Get_self( context );
     927
     928  _Scheduler_strong_APA_Set_scheduled( self, idle_base, cpu );
    394929}
    395930
     
    406941    _Scheduler_strong_APA_Has_ready,
    407942    _Scheduler_strong_APA_Enqueue_scheduled,
    408     _Scheduler_SMP_Do_nothing_register_idle
     943    _Scheduler_strong_APA_Register_idle
     944  );
     945}
     946
     947void _Scheduler_strong_APA_Start_idle(
     948  const Scheduler_Control *scheduler,
     949  Thread_Control          *idle,
     950  Per_CPU_Control         *cpu
     951)
     952{
     953  Scheduler_Context *context;
     954
     955  context = _Scheduler_Get_context( scheduler );
     956
     957  _Scheduler_SMP_Do_start_idle(
     958    context,
     959    idle,
     960    cpu,
     961    _Scheduler_strong_APA_Register_idle
    409962  );
    410963}
     
    425978}
    426979
    427 void _Scheduler_strong_APA_Yield(
    428   const Scheduler_Control *scheduler,
     980void _Scheduler_strong_APA_Node_initialize(
     981  const Scheduler_Control *scheduler,
     982  Scheduler_Node          *node,
    429983  Thread_Control          *the_thread,
    430   Scheduler_Node          *node
    431 )
    432 {
    433   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
    434 
    435   _Scheduler_SMP_Yield(
    436     context,
    437     the_thread,
    438     node,
    439     _Scheduler_strong_APA_Extract_from_ready,
    440     _Scheduler_strong_APA_Enqueue,
    441     _Scheduler_strong_APA_Enqueue_scheduled
    442   );
    443 }
     984  Priority_Control         priority
     985)
     986{
     987  Scheduler_SMP_Node *smp_node;
     988  Scheduler_strong_APA_Node *strong_node;
     989
     990  smp_node = _Scheduler_SMP_Node_downcast( node );
     991  strong_node = _Scheduler_strong_APA_Node_downcast( node );
     992
     993  _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
     994
     995  _Processor_mask_Assign(
     996    &strong_node->Affinity,
     997   _SMP_Get_online_processors()
     998  );
     999}
     1000
     1001Status_Control _Scheduler_strong_APA_Set_affinity(
     1002  const Scheduler_Control *scheduler,
     1003  Thread_Control          *thread,
     1004  Scheduler_Node          *node_base,
     1005  const Processor_mask    *affinity
     1006)
     1007{
     1008  Scheduler_Context         *context;
     1009  Scheduler_strong_APA_Node *node;
     1010  Processor_mask             local_affinity;
     1011
     1012  context = _Scheduler_Get_context( scheduler );
     1013  _Processor_mask_And( &local_affinity, &context->Processors, affinity );
     1014
     1015  if ( _Processor_mask_Is_zero( &local_affinity ) ) {
     1016    return STATUS_INVALID_NUMBER;
     1017  }
     1018
     1019  node = _Scheduler_strong_APA_Node_downcast( node_base );
     1020
     1021  if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) )
     1022    return STATUS_SUCCESSFUL;   /* Nothing to do. Return true. */
     1023
     1024 _Processor_mask_Assign( &node->Affinity, &local_affinity );
     1025
     1026 _Scheduler_SMP_Set_affinity(
     1027   context,
     1028   thread,
     1029   node_base,
     1030   &local_affinity,
     1031   _Scheduler_strong_APA_Do_set_affinity,
     1032   _Scheduler_strong_APA_Extract_from_ready,
     1033   _Scheduler_strong_APA_Get_highest_ready,
     1034   _Scheduler_strong_APA_Move_from_ready_to_scheduled,
     1035   _Scheduler_strong_APA_Enqueue,
     1036   _Scheduler_strong_APA_Allocate_processor
     1037 );
     1038
     1039  return STATUS_SUCCESSFUL;
     1040}
Note: See TracChangeset for help on using the changeset viewer.