Changeset c597fb1 in rtems


Ignore:
Timestamp:
Nov 9, 2017, 3:21:37 PM (18 months ago)
Author:
Sebastian Huber <sebastian.huber@…>
Branches:
master
Children:
c0d602e
Parents:
5018894e
git-author:
Sebastian Huber <sebastian.huber@…> (11/09/17 15:21:37)
git-committer:
Sebastian Huber <sebastian.huber@…> (11/20/17 07:36:49)
Message:

score: Optimize scheduler priority updates

Thread priority changes may append or prepend the thread to its priority
group on the scheduler ready queue. Previously, a separate priority
value and a prepend-it flag in the scheduler node were used to propagate
a priority change to the scheduler.

Now, use an append-it bit in the priority control and reduce the plain
priority value to 63 bits.

This change leads to a significant code size reduction (about 25%) of
the SMP schedulers. The negligible increase of the standard priority
scheduler is due to some additional shift operations
(SCHEDULER_PRIORITY_MAP() and SCHEDULER_PRIORITY_UNMAP()).

Before:

text filename

136 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleblock.o
464 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimplechangepriority.o

24 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimple.o

108 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleschedule.o
292 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleunblock.o
264 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleyield.o

text filename

280 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityblock.o
488 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerprioritychangepriority.o
200 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriority.o
164 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityschedule.o
328 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityunblock.o
200 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityyield.o

text filename

24112 arm-rtems5/c/imx7/cpukit/score/src/libscore_a-scheduleredfsmp.o

text filename

37204 sparc-rtems5/c/gr740/cpukit/score/src/libscore_a-scheduleredfsmp.o

text filename

42236 powerpc-rtems5/c/qoriq_e6500_32/cpukit/score/src/libscore_a-scheduleredfsmp.o

After:

text filename

136 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleblock.o
272 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimplechangepriority.o

24 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimple.o

108 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleschedule.o
292 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleunblock.o
264 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleyield.o

text filename

280 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityblock.o
488 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerprioritychangepriority.o
208 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriority.o
164 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityschedule.o
332 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityunblock.o
200 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityyield.o

text filename

18860 arm-rtems5/c/imx7/cpukit/score/src/libscore_a-scheduleredfsmp.o

text filename

28520 sparc-rtems5/c/gr740/cpukit/score/src/libscore_a-scheduleredfsmp.o

text filename

32664 powerpc-rtems5/c/qoriq_e6500_32/cpukit/score/src/libscore_a-scheduleredfsmp.o

Files:
28 edited

Legend:

Unmodified
Added
Removed
  • cpukit/score/include/rtems/score/priority.h

    r5018894e rc597fb1  
    99 *  On-Line Applications Research Corporation (OAR).
    1010 *
    11  *  Copyright (c) 2016 embedded brains GmbH.
     11 *  Copyright (c) 2016, 2017 embedded brains GmbH.
    1212 *
    1313 *  The license and distribution terms for this file may be
     
    4646
    4747/**
    48  * @brief A plain thread priority value.
     48 * @brief The thread priority control.
    4949 *
    5050 * Lower values represent higher priorities.  So, a priority value of zero
    5151 * represents the highest priority thread.  This value is reserved for internal
    5252 * threads and the priority ceiling protocol.
     53 *
     54 * The format of the thread priority control depends on the context.  A thread
     55 * priority control may contain a user visible priority for API import/export.
     56 * It may also contain a scheduler internal priority value.  Values are
     57 * translated via the scheduler map/unmap priority operations.  The format of
     58 * scheduler interal values depend on the particular scheduler implementation.
     59 * It may for example encode a deadline in case of the EDF scheduler.
     60 *
     61 * The thread priority control value contained in the scheduler node
     62 * (Scheduler_Node::Priority::value) uses the least-significant bit to indicate
     63 * if the thread should be appended or prepended to its priority group, see
     64 * SCHEDULER_PRIORITY_APPEND().
    5365 */
    5466typedef uint64_t Priority_Control;
  • cpukit/score/include/rtems/score/scheduler.h

    r5018894e rc597fb1  
    335335
    336336/**
    337  * @brief Returns the thread priority.
    338  *
    339  * @param[in] scheduler Unused.
    340  * @param[in] priority The thread priority.
    341  *
    342  * @return priority The thread priority.
     337 * @brief Returns the scheduler internal thread priority mapped by
     338 * SCHEDULER_PRIORITY_MAP().
     339 *
     340 * @param[in] scheduler Unused.
     341 * @param[in] priority The user visible thread priority.
     342 *
     343 * @return priority The scheduler internal thread priority.
    343344 */
    344345Priority_Control _Scheduler_default_Map_priority(
     
    347348);
    348349
    349 #define _Scheduler_default_Unmap_priority _Scheduler_default_Map_priority
     350/**
     351 * @brief Returns the user visible thread priority unmapped by
     352 * SCHEDULER_PRIORITY_UNMAP().
     353 *
     354 * @param[in] scheduler Unused.
     355 * @param[in] priority The scheduler internal thread priority.
     356 *
     357 * @return priority The user visible thread priority.
     358 */
     359Priority_Control _Scheduler_default_Unmap_priority(
     360  const Scheduler_Control *scheduler,
     361  Priority_Control         priority
     362);
    350363
    351364#if defined(RTEMS_SMP)
  • cpukit/score/include/rtems/score/scheduleredfimpl.h

    r5018894e rc597fb1  
    8080}
    8181
    82 RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Less_or_equal(
     82RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Priority_less_equal(
    8383  const void        *left,
    8484  const RBTree_Node *right
     
    102102  Scheduler_EDF_Context *context,
    103103  Scheduler_EDF_Node    *node,
    104   Priority_Control       priority
     104  Priority_Control       insert_priority
    105105)
    106106{
     
    108108    &context->Ready,
    109109    &node->Node,
    110     &priority,
    111     _Scheduler_EDF_Less
    112   );
    113 }
    114 
    115 RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Enqueue_first(
    116   Scheduler_EDF_Context *context,
    117   Scheduler_EDF_Node    *node,
    118   Priority_Control       priority
    119 )
    120 {
    121   _RBTree_Insert_inline(
    122     &context->Ready,
    123     &node->Node,
    124     &priority,
    125     _Scheduler_EDF_Less_or_equal
     110    &insert_priority,
     111    _Scheduler_EDF_Priority_less_equal
    126112  );
    127113}
  • cpukit/score/include/rtems/score/schedulerimpl.h

    r5018894e rc597fb1  
    3636 */
    3737/**@{**/
     38
     39/**
     40 * @brief Maps a priority value to support the append indicator.
     41 */
     42#define SCHEDULER_PRIORITY_MAP( priority ) ( ( priority ) << 1 )
     43
     44/**
     45 * @brief Returns the plain priority value.
     46 */
     47#define SCHEDULER_PRIORITY_UNMAP( priority ) ( ( priority ) >> 1 )
     48
     49/**
     50 * @brief Clears the priority append indicator bit.
     51 */
     52#define SCHEDULER_PRIORITY_PURIFY( priority )  \
     53  ( ( priority ) & ~( (Priority_Control) SCHEDULER_PRIORITY_APPEND_FLAG ) )
     54
     55/**
     56 * @brief Returns the priority control with the append indicator bit set.
     57 */
     58#define SCHEDULER_PRIORITY_APPEND( priority )  \
     59  ( ( priority ) | SCHEDULER_PRIORITY_APPEND_FLAG )
     60
     61/**
     62 * @brief Returns true, if the item should be appended to its priority group,
     63 * otherwise returns false and the item should be prepended to its priority
     64 * group.
     65 */
     66#define SCHEDULER_PRIORITY_IS_APPEND( priority ) \
     67  ( ( ( priority ) & SCHEDULER_PRIORITY_APPEND_FLAG ) != 0 )
    3868
    3969/**
  • cpukit/score/include/rtems/score/schedulernode.h

    r5018894e rc597fb1  
    176176     * The producer of this value is _Thread_Change_priority().  The consumer
    177177     * is the scheduler via the unblock and update priority operations.
     178     *
     179     * This priority control consists of two parts.  One part is the plain
     180     * priority value (most-significant 63 bits).  The other part is the
     181     * least-significant bit which indicates if the thread should be appended
     182     * (bit set) or prepended (bit cleared) to its priority group, see
     183     * SCHEDULER_PRIORITY_APPEND().
    178184     */
    179185    Priority_Control value;
     
    185191    SMP_sequence_lock_Control Lock;
    186192#endif
    187 
    188     /**
    189      * @brief In case a priority update is necessary and this is true, then
    190      * enqueue the thread as the first of its priority group, otherwise enqueue
    191      * the thread as the last of its priority group.
    192      */
    193     bool prepend_it;
    194193  } Priority;
    195194};
  • cpukit/score/include/rtems/score/schedulernodeimpl.h

    r5018894e rc597fb1  
    11/*
    2  * Copyright (c) 2014, 2016 embedded brains GmbH.  All rights reserved.
     2 * Copyright (c) 2014, 2017 embedded brains GmbH.  All rights reserved.
    33 *
    44 *  embedded brains GmbH
     
    3131  RTEMS_CONTAINER_OF( node, Scheduler_Node, Wait.Priority )
    3232
     33/**
     34 * @brief Priority append indicator for the priority control used for the
     35 * scheduler node priority.
     36 */
     37#define SCHEDULER_PRIORITY_APPEND_FLAG 1
     38
    3339RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
    3440  const struct _Scheduler_Control *scheduler,
     
    4147
    4248  node->Priority.value = priority;
    43   node->Priority.prepend_it = false;
    4449
    4550#if defined(RTEMS_SMP)
     
    7075
    7176RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Node_get_priority(
    72   Scheduler_Node *node,
    73   bool           *prepend_it_p
     77  Scheduler_Node *node
    7478)
    7579{
    7680  Priority_Control priority;
    77   bool             prepend_it;
    7881
    7982#if defined(RTEMS_SMP)
     
    8588
    8689    priority = node->Priority.value;
    87     prepend_it = node->Priority.prepend_it;
    8890
    8991#if defined(RTEMS_SMP)
    9092  } while ( _SMP_sequence_lock_Read_retry( &node->Priority.Lock, seq ) );
    9193#endif
    92 
    93   *prepend_it_p = prepend_it;
    9494
    9595  return priority;
     
    108108#endif
    109109
     110  new_priority |= ( prepend_it ? 0 : SCHEDULER_PRIORITY_APPEND_FLAG );
    110111  node->Priority.value = new_priority;
    111   node->Priority.prepend_it = prepend_it;
    112112
    113113#if defined(RTEMS_SMP)
  • cpukit/score/include/rtems/score/schedulerpriorityimpl.h

    r5018894e rc597fb1  
    217217RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_update(
    218218  Scheduler_priority_Ready_queue *ready_queue,
    219   Priority_Control                new_priority,
     219  unsigned int                    new_priority,
    220220  Priority_bit_map_Control       *bit_map,
    221221  Chain_Control                  *ready_queues
    222222)
    223223{
    224   ready_queue->current_priority = (unsigned int) new_priority;
     224  ready_queue->current_priority = new_priority;
    225225  ready_queue->ready_chain = &ready_queues[ new_priority ];
    226226
     
    228228    bit_map,
    229229    &ready_queue->Priority_map,
    230     (unsigned int) new_priority
     230    new_priority
    231231  );
    232232}
  • cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h

    r5018894e rc597fb1  
    88
    99/*
    10  * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
     10 * Copyright (c) 2013, 2017 embedded brains GmbH.  All rights reserved.
    1111 *
    1212 *  embedded brains GmbH
     
    9191  Scheduler_priority_SMP_Context *self;
    9292  Scheduler_priority_SMP_Node    *node;
    93   Priority_Control                priority;
     93  Priority_Control                insert_priority;
    9494
    9595  self = _Scheduler_priority_SMP_Get_self( context );
     
    101101    &self->Bit_map
    102102  );
    103   priority = node->Base.priority;
     103  insert_priority = _Scheduler_SMP_Node_priority( &node->Base.Base );
     104  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
    104105  _Chain_Insert_ordered_unprotected(
    105106    &self->Base.Scheduled,
    106107    &node->Base.Base.Node.Chain,
    107     &priority,
    108     _Scheduler_SMP_Insert_priority_fifo_order
     108    &insert_priority,
     109    _Scheduler_SMP_Priority_less_equal
    109110  );
    110111}
    111112
    112 static inline void _Scheduler_priority_SMP_Insert_ready_lifo(
     113static inline void _Scheduler_priority_SMP_Insert_ready(
    113114  Scheduler_Context *context,
    114   Scheduler_Node    *thread
     115  Scheduler_Node    *node_base,
     116  Priority_Control   insert_priority
    115117)
    116118{
    117   Scheduler_priority_SMP_Context *self =
    118     _Scheduler_priority_SMP_Get_self( context );
    119   Scheduler_priority_SMP_Node *node =
    120     _Scheduler_priority_SMP_Node_downcast( thread );
     119  Scheduler_priority_SMP_Context *self;
     120  Scheduler_priority_SMP_Node    *node;
    121121
    122   _Scheduler_priority_Ready_queue_enqueue(
    123     &node->Base.Base.Node.Chain,
    124     &node->Ready_queue,
    125     &self->Bit_map
    126   );
    127 }
     122  self = _Scheduler_priority_SMP_Get_self( context );
     123  node = _Scheduler_priority_SMP_Node_downcast( node_base );
    128124
    129 static inline void _Scheduler_priority_SMP_Insert_ready_fifo(
    130   Scheduler_Context *context,
    131   Scheduler_Node    *thread
    132 )
    133 {
    134   Scheduler_priority_SMP_Context *self =
    135     _Scheduler_priority_SMP_Get_self( context );
    136   Scheduler_priority_SMP_Node *node =
    137     _Scheduler_priority_SMP_Node_downcast( thread );
    138 
    139   _Scheduler_priority_Ready_queue_enqueue_first(
    140     &node->Base.Base.Node.Chain,
    141     &node->Ready_queue,
    142     &self->Bit_map
    143   );
     125  if ( SCHEDULER_PRIORITY_IS_APPEND( insert_priority ) ) {
     126    _Scheduler_priority_Ready_queue_enqueue(
     127      &node->Base.Base.Node.Chain,
     128      &node->Ready_queue,
     129      &self->Bit_map
     130    );
     131  } else {
     132    _Scheduler_priority_Ready_queue_enqueue_first(
     133      &node->Base.Base.Node.Chain,
     134      &node->Ready_queue,
     135      &self->Bit_map
     136    );
     137  }
    144138}
    145139
     
    163157static inline void _Scheduler_priority_SMP_Do_update(
    164158  Scheduler_Context *context,
    165   Scheduler_Node *node_to_update,
    166   Priority_Control new_priority
     159  Scheduler_Node    *node_to_update,
     160  Priority_Control   new_priority
    167161)
    168162{
    169   Scheduler_priority_SMP_Context *self =
    170     _Scheduler_priority_SMP_Get_self( context );
    171   Scheduler_priority_SMP_Node *node =
    172     _Scheduler_priority_SMP_Node_downcast( node_to_update );
     163  Scheduler_priority_SMP_Context *self;
     164  Scheduler_priority_SMP_Node    *node;
     165
     166  self = _Scheduler_priority_SMP_Get_self( context );
     167  node = _Scheduler_priority_SMP_Node_downcast( node_to_update );
    173168
    174169  _Scheduler_SMP_Node_update_priority( &node->Base, new_priority );
    175170  _Scheduler_priority_Ready_queue_update(
    176171    &node->Ready_queue,
    177     new_priority,
     172    SCHEDULER_PRIORITY_UNMAP( new_priority ),
    178173    &self->Bit_map,
    179174    &self->Ready[ 0 ]
  • cpukit/score/include/rtems/score/schedulersimpleimpl.h

    r5018894e rc597fb1  
    3939}
    4040
    41 RTEMS_INLINE_ROUTINE bool _Scheduler_simple_Insert_priority_lifo_order(
     41RTEMS_INLINE_ROUTINE bool _Scheduler_simple_Priority_less_equal(
    4242  const void       *to_insert,
    4343  const Chain_Node *next
    4444)
    4545{
    46   const Priority_Control *priority_to_insert;
    47   const Thread_Control   *thread_next;
     46  const unsigned int  *priority_to_insert;
     47  const Thread_Control *thread_next;
    4848
    49   priority_to_insert = (const Priority_Control *) to_insert;
     49  priority_to_insert = (const unsigned int *) to_insert;
    5050  thread_next = (const Thread_Control *) next;
    5151
     
    5353}
    5454
    55 RTEMS_INLINE_ROUTINE bool _Scheduler_simple_Insert_priority_fifo_order(
    56   const void       *to_insert,
    57   const Chain_Node *next
     55RTEMS_INLINE_ROUTINE void _Scheduler_simple_Insert(
     56  Chain_Control  *chain,
     57  Thread_Control *to_insert,
     58  unsigned int    insert_priority
    5859)
    5960{
    60   const Priority_Control *priority_to_insert;
    61   const Thread_Control   *thread_next;
    62 
    63   priority_to_insert = (const Priority_Control *) to_insert;
    64   thread_next = (const Thread_Control *) next;
    65 
    66   return *priority_to_insert < _Thread_Get_priority( thread_next );
    67 }
    68 
    69 RTEMS_INLINE_ROUTINE void _Scheduler_simple_Insert_priority_lifo(
    70   Chain_Control  *chain,
    71   Thread_Control *to_insert
    72 )
    73 {
    74   Priority_Control priority_to_insert;
    75 
    76   priority_to_insert = _Thread_Get_priority( to_insert );
    77 
    7861  _Chain_Insert_ordered_unprotected(
    7962    chain,
    8063    &to_insert->Object.Node,
    81     &priority_to_insert,
    82     _Scheduler_simple_Insert_priority_lifo_order
    83   );
    84 }
    85 
    86 RTEMS_INLINE_ROUTINE void _Scheduler_simple_Insert_priority_fifo(
    87   Chain_Control  *chain,
    88   Thread_Control *to_insert
    89 )
    90 {
    91   Priority_Control priority_to_insert;
    92 
    93   priority_to_insert = _Thread_Get_priority( to_insert );
    94 
    95   _Chain_Insert_ordered_unprotected(
    96     chain,
    97     &to_insert->Object.Node,
    98     &priority_to_insert,
    99     _Scheduler_simple_Insert_priority_fifo_order
     64    &insert_priority,
     65    _Scheduler_simple_Priority_less_equal
    10066  );
    10167}
  • cpukit/score/include/rtems/score/schedulersmpimpl.h

    r5018894e rc597fb1  
    4343 *
    4444 * State transitions are triggered via basic operations
    45  * - _Scheduler_SMP_Enqueue_ordered(),
    46  * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
     45 * - _Scheduler_SMP_Enqueue(),
     46 * - _Scheduler_SMP_Enqueue_scheduled(), and
    4747 * - _Scheduler_SMP_Block().
    4848 *
     
    297297typedef void ( *Scheduler_SMP_Insert )(
    298298  Scheduler_Context *context,
    299   Scheduler_Node    *node_to_insert
     299  Scheduler_Node    *node_to_insert,
     300  Priority_Control   insert_priority
    300301);
    301302
     
    325326typedef bool ( *Scheduler_SMP_Enqueue )(
    326327  Scheduler_Context *context,
    327   Scheduler_Node    *node_to_enqueue
     328  Scheduler_Node    *node_to_enqueue,
     329  Priority_Control   priority
    328330);
    329331
     
    352354}
    353355
    354 static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
     356static inline bool _Scheduler_SMP_Priority_less_equal(
    355357  const void       *to_insert,
    356358  const Chain_Node *next
     
    364366
    365367  return *priority_to_insert <= node_next->priority;
    366 }
    367 
    368 static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
    369   const void       *to_insert,
    370   const Chain_Node *next
    371 )
    372 {
    373   const Priority_Control   *priority_to_insert;
    374   const Scheduler_SMP_Node *node_next;
    375 
    376   priority_to_insert = (const Priority_Control *) to_insert;
    377   node_next = (const Scheduler_SMP_Node *) next;
    378 
    379   return *priority_to_insert < node_next->priority;
    380368}
    381369
     
    638626  Scheduler_Context                *context,
    639627  Scheduler_Node                   *node,
     628  Priority_Control                  priority,
    640629  Scheduler_Node                   *lowest_scheduled,
    641630  Scheduler_SMP_Insert              insert_scheduled,
     
    661650    );
    662651
    663     ( *insert_scheduled )( context, node );
     652    ( *insert_scheduled )( context, node, priority );
    664653    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
    665654
     
    676665    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
    677666
    678     ( *insert_scheduled )( context, node );
     667    ( *insert_scheduled )( context, node, priority );
    679668    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
    680669
     
    697686 * @param[in] context The scheduler instance context.
    698687 * @param[in] node The node to enqueue.
     688 * @param[in] priority The node insert priority.
    699689 * @param[in] order The order function.
    700690 * @param[in] insert_ready Function to insert a node into the set of ready
     
    711701 *   based on the rules of the scheduler.
    712702 */
    713 static inline bool _Scheduler_SMP_Enqueue_ordered(
     703static inline bool _Scheduler_SMP_Enqueue(
    714704  Scheduler_Context                  *context,
    715705  Scheduler_Node                     *node,
     706  Priority_Control                    insert_priority,
    716707  Chain_Node_order                    order,
    717708  Scheduler_SMP_Insert                insert_ready,
     
    722713)
    723714{
    724   bool              needs_help;
    725   Scheduler_Node   *lowest_scheduled;
    726   Priority_Control  node_priority;
     715  bool            needs_help;
     716  Scheduler_Node *lowest_scheduled;
    727717
    728718  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
    729   node_priority = _Scheduler_SMP_Node_priority( node );
    730 
    731   if ( ( *order )( &node_priority, &lowest_scheduled->Node.Chain ) ) {
     719
     720  if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
    732721    _Scheduler_SMP_Enqueue_to_scheduled(
    733722      context,
    734723      node,
     724      insert_priority,
    735725      lowest_scheduled,
    736726      insert_scheduled,
     
    740730    needs_help = false;
    741731  } else {
    742     ( *insert_ready )( context, node );
     732    ( *insert_ready )( context, node, insert_priority );
    743733    needs_help = true;
    744734  }
     
    766756 *   based on the rules of the scheduler.
    767757 */
    768 static inline bool _Scheduler_SMP_Enqueue_scheduled_ordered(
     758static inline bool _Scheduler_SMP_Enqueue_scheduled(
    769759  Scheduler_Context                *context,
    770   Scheduler_Node                   *node,
     760  Scheduler_Node                   *const node,
     761  Priority_Control                  insert_priority,
    771762  Chain_Node_order                  order,
    772763  Scheduler_SMP_Extract             extract_from_ready,
     
    781772    Scheduler_Node                   *highest_ready;
    782773    Scheduler_Try_to_schedule_action  action;
    783     Priority_Control                  node_priority;
    784774
    785775    highest_ready = ( *get_highest_ready )( context, node );
    786     node_priority = _Scheduler_SMP_Node_priority( node );
    787776
    788777    /*
     
    792781    if (
    793782      node->sticky_level > 0
    794         && ( *order )( &node_priority, &highest_ready->Node.Chain )
     783        && ( *order )( &insert_priority, &highest_ready->Node.Chain )
    795784    ) {
    796       ( *insert_scheduled )( context, node );
     785      ( *insert_scheduled )( context, node, insert_priority );
    797786
    798787      if ( _Scheduler_Node_get_idle( node ) != NULL ) {
     
    840829      );
    841830
    842       ( *insert_ready )( context, node );
     831      ( *insert_ready )( context, node, insert_priority );
    843832      ( *move_from_ready_to_scheduled )( context, highest_ready );
    844833
     
    856845      );
    857846
    858       ( *insert_ready )( context, node );
     847      ( *insert_ready )( context, node, insert_priority );
    859848      ( *move_from_ready_to_scheduled )( context, highest_ready );
    860849
     
    10341023  Scheduler_Node        *node,
    10351024  Scheduler_SMP_Update   update,
    1036   Scheduler_SMP_Enqueue  enqueue_fifo
     1025  Scheduler_SMP_Enqueue  enqueue
    10371026)
    10381027{
     
    10501039
    10511040  if ( unblock ) {
    1052     Priority_Control new_priority;
    1053     bool             prepend_it;
     1041    Priority_Control priority;
    10541042    bool             needs_help;
    10551043
    1056     new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
    1057     (void) prepend_it;
    1058 
    1059     if ( new_priority != _Scheduler_SMP_Node_priority( node ) ) {
    1060       ( *update )( context, node, new_priority );
     1044    priority = _Scheduler_Node_get_priority( node );
     1045    priority = SCHEDULER_PRIORITY_PURIFY( priority );
     1046
     1047    if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
     1048      ( *update )( context, node, priority );
    10611049    }
    10621050
    10631051    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
     1052      Priority_Control insert_priority;
     1053
    10641054      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
    1065 
    1066       needs_help = ( *enqueue_fifo )( context, node );
     1055      insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
     1056      needs_help = ( *enqueue )( context, node, insert_priority );
    10671057    } else {
    10681058      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
     
    10841074  Scheduler_SMP_Extract       extract_from_ready,
    10851075  Scheduler_SMP_Update        update,
    1086   Scheduler_SMP_Enqueue       enqueue_fifo,
    1087   Scheduler_SMP_Enqueue       enqueue_lifo,
    1088   Scheduler_SMP_Enqueue       enqueue_scheduled_fifo,
    1089   Scheduler_SMP_Enqueue       enqueue_scheduled_lifo,
     1076  Scheduler_SMP_Enqueue       enqueue,
     1077  Scheduler_SMP_Enqueue       enqueue_scheduled,
    10901078  Scheduler_SMP_Ask_for_help  ask_for_help
    10911079)
    10921080{
    1093   Priority_Control         new_priority;
    1094   bool                     prepend_it;
     1081  Priority_Control         priority;
     1082  Priority_Control         insert_priority;
    10951083  Scheduler_SMP_Node_state node_state;
    10961084
    1097   new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
    1098 
    1099   if ( new_priority == _Scheduler_SMP_Node_priority( node ) ) {
     1085  insert_priority = _Scheduler_Node_get_priority( node );
     1086  priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
     1087
     1088  if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
    11001089    if ( _Thread_Is_ready( thread ) ) {
    11011090      ( *ask_for_help )( context, thread, node );
     
    11091098  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
    11101099    _Scheduler_SMP_Extract_from_scheduled( node );
    1111 
    1112     ( *update )( context, node, new_priority );
    1113 
    1114     if ( prepend_it ) {
    1115       ( *enqueue_scheduled_lifo )( context, node );
    1116     } else {
    1117       ( *enqueue_scheduled_fifo )( context, node );
    1118     }
     1100    ( *update )( context, node, priority );
     1101    ( *enqueue_scheduled )( context, node, insert_priority );
    11191102  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
    11201103    ( *extract_from_ready )( context, node );
    1121 
    1122     ( *update )( context, node, new_priority );
    1123 
    1124     if ( prepend_it ) {
    1125       ( *enqueue_lifo )( context, node );
    1126     } else {
    1127       ( *enqueue_fifo )( context, node );
    1128     }
     1104    ( *update )( context, node, priority );
     1105    ( *enqueue )( context, node, insert_priority );
    11291106  } else {
    1130     ( *update )( context, node, new_priority );
     1107    ( *update )( context, node, priority );
    11311108
    11321109    if ( _Thread_Is_ready( thread ) ) {
     
    11411118  Scheduler_Node        *node,
    11421119  Scheduler_SMP_Extract  extract_from_ready,
    1143   Scheduler_SMP_Enqueue  enqueue_fifo,
    1144   Scheduler_SMP_Enqueue  enqueue_scheduled_fifo
     1120  Scheduler_SMP_Enqueue  enqueue,
     1121  Scheduler_SMP_Enqueue  enqueue_scheduled
    11451122)
    11461123{
    11471124  bool                     needs_help;
    11481125  Scheduler_SMP_Node_state node_state;
     1126  Priority_Control         insert_priority;
    11491127
    11501128  node_state = _Scheduler_SMP_Node_state( node );
     1129  insert_priority = _Scheduler_SMP_Node_priority( node );
     1130  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
    11511131
    11521132  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
    11531133    _Scheduler_SMP_Extract_from_scheduled( node );
    1154     ( *enqueue_scheduled_fifo )( context, node );
     1134    ( *enqueue_scheduled )( context, node, insert_priority );
    11551135    needs_help = false;
    11561136  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
    11571137    ( *extract_from_ready )( context, node );
    11581138
    1159     needs_help = ( *enqueue_fifo )( context, node );
     1139    needs_help = ( *enqueue )( context, node, insert_priority );
    11601140  } else {
    11611141    needs_help = true;
     
    11671147}
    11681148
    1169 static inline void _Scheduler_SMP_Insert_scheduled_lifo(
    1170   Scheduler_Context *context,
    1171   Scheduler_Node    *node_to_insert
     1149static inline void _Scheduler_SMP_Insert_scheduled(
     1150  Scheduler_Context *context,
     1151  Scheduler_Node    *node_to_insert,
     1152  Priority_Control   priority_to_insert
    11721153)
    11731154{
    11741155  Scheduler_SMP_Context *self;
    1175   Priority_Control       priority_to_insert;
    11761156
    11771157  self = _Scheduler_SMP_Get_self( context );
    1178   priority_to_insert = _Scheduler_SMP_Node_priority( node_to_insert );
    11791158
    11801159  _Chain_Insert_ordered_unprotected(
     
    11821161    &node_to_insert->Node.Chain,
    11831162    &priority_to_insert,
    1184     _Scheduler_SMP_Insert_priority_lifo_order
    1185   );
    1186 }
    1187 
    1188 static inline void _Scheduler_SMP_Insert_scheduled_fifo(
    1189   Scheduler_Context *context,
    1190   Scheduler_Node    *node_to_insert
    1191 )
    1192 {
    1193   Scheduler_SMP_Context *self;
    1194   Priority_Control       priority_to_insert;
    1195 
    1196   self = _Scheduler_SMP_Get_self( context );
    1197   priority_to_insert = _Scheduler_SMP_Node_priority( node_to_insert );
    1198 
    1199   _Chain_Insert_ordered_unprotected(
    1200     &self->Scheduled,
    1201     &node_to_insert->Node.Chain,
    1202     &priority_to_insert,
    1203     _Scheduler_SMP_Insert_priority_fifo_order
     1163    _Scheduler_SMP_Priority_less_equal
    12041164  );
    12051165}
     
    12311191
    12321192    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
    1233       Priority_Control node_priority;
    1234 
    1235       node_priority = _Scheduler_SMP_Node_priority( node );
    1236 
    1237       if ( ( *order )( &node_priority, &lowest_scheduled->Node.Chain ) ) {
     1193      Priority_Control insert_priority;
     1194
     1195      insert_priority = _Scheduler_SMP_Node_priority( node );
     1196
     1197      if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
    12381198        _Thread_Scheduler_cancel_need_for_help(
    12391199          thread,
     
    12501210        );
    12511211
    1252         ( *insert_scheduled )( context, node );
     1212        ( *insert_scheduled )( context, node, insert_priority );
    12531213        ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
    12541214
     
    12621222        _Thread_Scheduler_release_critical( thread, &lock_context );
    12631223        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
    1264         ( *insert_ready )( context, node );
     1224        ( *insert_ready )( context, node, insert_priority );
    12651225        success = false;
    12661226      }
     
    13851345  Thread_Control              *idle,
    13861346  Scheduler_SMP_Has_ready      has_ready,
    1387   Scheduler_SMP_Enqueue        enqueue_scheduled_fifo,
     1347  Scheduler_SMP_Enqueue        enqueue_scheduled,
    13881348  Scheduler_SMP_Register_idle  register_idle
    13891349)
     
    14001360
    14011361  if ( ( *has_ready )( &self->Base ) ) {
    1402     ( *enqueue_scheduled_fifo )( &self->Base, node );
     1362    Priority_Control insert_priority;
     1363
     1364    insert_priority = _Scheduler_SMP_Node_priority( node );
     1365    insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
     1366    ( *enqueue_scheduled )( &self->Base, node, insert_priority );
    14031367  } else {
    14041368    _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
     
    14101374  Per_CPU_Control       *cpu,
    14111375  Scheduler_SMP_Extract  extract_from_ready,
    1412   Scheduler_SMP_Enqueue  enqueue_fifo
     1376  Scheduler_SMP_Enqueue  enqueue
    14131377)
    14141378{
     
    14521416
    14531417    if ( !_Chain_Is_empty( &self->Scheduled ) ) {
    1454       ( *enqueue_fifo )( context, victim_node );
     1418      Priority_Control insert_priority;
     1419
     1420      insert_priority = _Scheduler_SMP_Node_priority( victim_node );
     1421      insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
     1422      ( *enqueue )( context, victim_node, insert_priority );
    14551423    }
    14561424  } else {
     
    14731441  Scheduler_SMP_Get_highest_ready  get_highest_ready,
    14741442  Scheduler_SMP_Move               move_from_ready_to_scheduled,
    1475   Scheduler_SMP_Enqueue            enqueue_fifo,
     1443  Scheduler_SMP_Enqueue            enqueue,
    14761444  Scheduler_SMP_Allocate_processor allocate_processor
    14771445)
    14781446{
    14791447  Scheduler_SMP_Node_state node_state;
     1448  Priority_Control         insert_priority;
    14801449
    14811450  node_state = _Scheduler_SMP_Node_state( node );
     1451  insert_priority = _Scheduler_SMP_Node_priority( node );
     1452  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
    14821453
    14831454  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
     
    14931464    );
    14941465    ( *set_affinity )( context, node, arg );
    1495     ( *enqueue_fifo )( context, node );
     1466    ( *enqueue )( context, node, insert_priority );
    14961467  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
    14971468    ( *extract_from_ready )( context, node );
    14981469    ( *set_affinity )( context, node, arg );
    1499     ( *enqueue_fifo )( context, node );
     1470    ( *enqueue )( context, node, insert_priority );
    15001471  } else {
    15011472    ( *set_affinity )( context, node, arg );
  • cpukit/score/src/schedulercbsunblock.c

    r5018894e rc597fb1  
    3535  Scheduler_CBS_Server *serv_info;
    3636  Priority_Control      priority;
    37   bool                  prepend_it;
    3837
    3938  the_node = _Scheduler_CBS_Node_downcast( node );
    4039  serv_info = the_node->cbs_server;
    41   priority = _Scheduler_Node_get_priority( &the_node->Base.Base, &prepend_it );
    42   (void) prepend_it;
     40  priority = _Scheduler_Node_get_priority( &the_node->Base.Base );
     41  priority = SCHEDULER_PRIORITY_PURIFY( priority );
    4342
    4443  /*
  • cpukit/score/src/schedulerdefaultmappriority.c

    r5018894e rc597fb1  
    11/*
    2  * Copyright (c) 2016 embedded brains GmbH
     2 * Copyright (c) 2016, 2017 embedded brains GmbH
    33 *
    44 * The license and distribution terms for this file may be
     
    1111#endif
    1212
    13 #include <rtems/score/scheduler.h>
     13#include <rtems/score/schedulerimpl.h>
    1414
    1515Priority_Control _Scheduler_default_Map_priority(
     
    1818)
    1919{
    20   return priority;
     20  return SCHEDULER_PRIORITY_MAP( priority );
    2121}
     22
     23Priority_Control _Scheduler_default_Unmap_priority(
     24  const Scheduler_Control *scheduler,
     25  Priority_Control         priority
     26)
     27{
     28  return SCHEDULER_PRIORITY_UNMAP( priority );
     29}
  • cpukit/score/src/scheduleredfchangepriority.c

    r5018894e rc597fb1  
    3030  Scheduler_EDF_Node    *the_node;
    3131  Priority_Control       priority;
    32   bool                   prepend_it;
     32  Priority_Control       insert_priority;
    3333
    3434  if ( !_Thread_Is_ready( the_thread ) ) {
     
    3838
    3939  the_node = _Scheduler_EDF_Node_downcast( node );
    40   priority = _Scheduler_Node_get_priority( &the_node->Base, &prepend_it );
     40  insert_priority = _Scheduler_Node_get_priority( &the_node->Base );
     41  priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
    4142
    4243  if ( priority == the_node->priority ) {
     
    4950
    5051  _Scheduler_EDF_Extract( context, the_node );
    51 
    52   if ( prepend_it ) {
    53     _Scheduler_EDF_Enqueue_first( context, the_node, priority );
    54   } else {
    55     _Scheduler_EDF_Enqueue( context, the_node, priority );
    56   }
    57 
     52  _Scheduler_EDF_Enqueue( context, the_node, insert_priority );
    5853  _Scheduler_EDF_Schedule_body( scheduler, the_thread, false );
    5954}
  • cpukit/score/src/scheduleredfreleasejob.c

    r5018894e rc597fb1  
    2626)
    2727{
    28   return SCHEDULER_EDF_PRIO_MSB | priority;
     28  return SCHEDULER_EDF_PRIO_MSB | SCHEDULER_PRIORITY_MAP( priority );
    2929}
    3030
     
    3434)
    3535{
    36   return priority & ~SCHEDULER_EDF_PRIO_MSB;
     36  return SCHEDULER_PRIORITY_UNMAP( priority & ~SCHEDULER_EDF_PRIO_MSB );
    3737}
    3838
     
    4949  _Thread_Wait_acquire_critical( the_thread, queue_context );
    5050
    51   _Priority_Node_set_priority( priority_node, deadline );
     51  /*
     52   * There is no integer overflow problem here due to the
     53   * SCHEDULER_PRIORITY_MAP().  The deadline is in clock ticks.  With the
     54   * minimum clock tick interval of 1us, the uptime is limited to about 146235
     55   * years.
     56   */
     57  _Priority_Node_set_priority(
     58    priority_node,
     59    SCHEDULER_PRIORITY_MAP( deadline )
     60  );
    5261
    5362  if ( _Priority_Node_is_active( priority_node ) ) {
  • cpukit/score/src/scheduleredfsmp.c

    r5018894e rc597fb1  
    4040}
    4141
    42 static inline bool _Scheduler_EDF_SMP_Less(
    43   const void        *left,
    44   const RBTree_Node *right
    45 )
    46 {
    47   const Priority_Control   *the_left;
    48   const Scheduler_SMP_Node *the_right;
    49   Priority_Control          prio_left;
    50   Priority_Control          prio_right;
    51 
    52   the_left = left;
    53   the_right = RTEMS_CONTAINER_OF( right, Scheduler_SMP_Node, Base.Node.RBTree );
    54 
    55   prio_left = *the_left;
    56   prio_right = the_right->priority;
    57 
    58   return prio_left < prio_right;
    59 }
    60 
    61 static inline bool _Scheduler_EDF_SMP_Less_or_equal(
     42static inline bool _Scheduler_EDF_SMP_Priority_less_equal(
    6243  const void        *left,
    6344  const RBTree_Node *right
     
    255236  Scheduler_Context *context,
    256237  Scheduler_Node    *node_base,
    257   int                generation_index,
    258   bool            ( *less )( const void *, const RBTree_Node * )
     238  Priority_Control   insert_priority
    259239)
    260240{
     
    263243  uint32_t                       rqi;
    264244  Scheduler_EDF_SMP_Ready_queue *ready_queue;
     245  int                            generation_index;
    265246  int                            increment;
    266247  int64_t                        generation;
     
    269250  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
    270251  rqi = node->ready_queue_index;
     252  generation_index = SCHEDULER_PRIORITY_IS_APPEND( insert_priority );
    271253  increment = ( generation_index << 1 ) - 1;
    272254  ready_queue = &self->Ready[ rqi ];
     
    280262    &ready_queue->Queue,
    281263    &node->Base.Base.Node.RBTree,
    282     &node->Base.priority,
    283     less
     264    &insert_priority,
     265    _Scheduler_EDF_SMP_Priority_less_equal
    284266  );
    285267
     
    328310)
    329311{
     312  Priority_Control insert_priority;
     313
    330314  _Chain_Extract_unprotected( &scheduled_to_ready->Node.Chain );
     315  insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
    331316  _Scheduler_EDF_SMP_Insert_ready(
    332317    context,
    333318    scheduled_to_ready,
    334     1,
    335     _Scheduler_EDF_SMP_Less
     319    insert_priority
    336320  );
    337321}
     
    342326)
    343327{
     328  Priority_Control insert_priority;
     329
    344330  _Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
    345   _Scheduler_SMP_Insert_scheduled_fifo( context, ready_to_scheduled );
    346 }
    347 
    348 static inline void _Scheduler_EDF_SMP_Insert_ready_lifo(
    349   Scheduler_Context *context,
    350   Scheduler_Node    *node_to_insert
    351 )
    352 {
    353   _Scheduler_EDF_SMP_Insert_ready(
    354     context,
    355     node_to_insert,
    356     0,
    357     _Scheduler_EDF_SMP_Less_or_equal
    358   );
    359 }
    360 
    361 static inline void _Scheduler_EDF_SMP_Insert_ready_fifo(
    362   Scheduler_Context *context,
    363   Scheduler_Node    *node_to_insert
    364 )
    365 {
    366   _Scheduler_EDF_SMP_Insert_ready(
    367     context,
    368     node_to_insert,
    369     1,
    370     _Scheduler_EDF_SMP_Less
     331  insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
     332  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
     333  _Scheduler_SMP_Insert_scheduled(
     334    context,
     335    ready_to_scheduled,
     336    insert_priority
    371337  );
    372338}
     
    445411}
    446412
    447 static inline bool _Scheduler_EDF_SMP_Enqueue_ordered(
    448   Scheduler_Context    *context,
    449   Scheduler_Node       *node,
    450   Chain_Node_order      order,
    451   Scheduler_SMP_Insert  insert_ready,
    452   Scheduler_SMP_Insert  insert_scheduled
    453 )
    454 {
    455   return _Scheduler_SMP_Enqueue_ordered(
    456     context,
    457     node,
    458     order,
    459     insert_ready,
    460     insert_scheduled,
     413static inline bool _Scheduler_EDF_SMP_Enqueue(
     414  Scheduler_Context *context,
     415  Scheduler_Node    *node,
     416  Priority_Control   insert_priority
     417)
     418{
     419  return _Scheduler_SMP_Enqueue(
     420    context,
     421    node,
     422    insert_priority,
     423    _Scheduler_SMP_Priority_less_equal,
     424    _Scheduler_EDF_SMP_Insert_ready,
     425    _Scheduler_SMP_Insert_scheduled,
    461426    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
    462427    _Scheduler_EDF_SMP_Get_lowest_scheduled,
     
    465430}
    466431
    467 static inline bool _Scheduler_EDF_SMP_Enqueue_lifo(
    468   Scheduler_Context *context,
    469   Scheduler_Node    *node
    470 )
    471 {
    472   return _Scheduler_EDF_SMP_Enqueue_ordered(
    473     context,
    474     node,
    475     _Scheduler_SMP_Insert_priority_lifo_order,
    476     _Scheduler_EDF_SMP_Insert_ready_lifo,
    477     _Scheduler_SMP_Insert_scheduled_lifo
    478   );
    479 }
    480 
    481 static inline bool _Scheduler_EDF_SMP_Enqueue_fifo(
    482   Scheduler_Context *context,
    483   Scheduler_Node    *node
    484 )
    485 {
    486   return _Scheduler_EDF_SMP_Enqueue_ordered(
    487     context,
    488     node,
    489     _Scheduler_SMP_Insert_priority_fifo_order,
    490     _Scheduler_EDF_SMP_Insert_ready_fifo,
    491     _Scheduler_SMP_Insert_scheduled_fifo
    492   );
    493 }
    494 
    495 static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
    496   Scheduler_Context *context,
    497   Scheduler_Node *node,
    498   Chain_Node_order order,
    499   Scheduler_SMP_Insert insert_ready,
    500   Scheduler_SMP_Insert insert_scheduled
    501 )
    502 {
    503   return _Scheduler_SMP_Enqueue_scheduled_ordered(
    504     context,
    505     node,
    506     order,
     432static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled(
     433  Scheduler_Context *context,
     434  Scheduler_Node    *node,
     435  Priority_Control   insert_priority
     436)
     437{
     438  return _Scheduler_SMP_Enqueue_scheduled(
     439    context,
     440    node,
     441    insert_priority,
     442    _Scheduler_SMP_Priority_less_equal,
    507443    _Scheduler_EDF_SMP_Extract_from_ready,
    508444    _Scheduler_EDF_SMP_Get_highest_ready,
    509     insert_ready,
    510     insert_scheduled,
     445    _Scheduler_EDF_SMP_Insert_ready,
     446    _Scheduler_SMP_Insert_scheduled,
    511447    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
    512448    _Scheduler_EDF_SMP_Allocate_processor
     
    514450}
    515451
    516 static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_lifo(
    517   Scheduler_Context *context,
    518   Scheduler_Node *node
    519 )
    520 {
    521   return _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
    522     context,
    523     node,
    524     _Scheduler_SMP_Insert_priority_lifo_order,
    525     _Scheduler_EDF_SMP_Insert_ready_lifo,
    526     _Scheduler_SMP_Insert_scheduled_lifo
    527   );
    528 }
    529 
    530 static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_fifo(
    531   Scheduler_Context *context,
    532   Scheduler_Node *node
    533 )
    534 {
    535   return _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
    536     context,
    537     node,
    538     _Scheduler_SMP_Insert_priority_fifo_order,
    539     _Scheduler_EDF_SMP_Insert_ready_fifo,
    540     _Scheduler_SMP_Insert_scheduled_fifo
    541   );
    542 }
    543 
    544452void _Scheduler_EDF_SMP_Unblock(
    545453  const Scheduler_Control *scheduler,
     
    555463    node,
    556464    _Scheduler_EDF_SMP_Do_update,
    557     _Scheduler_EDF_SMP_Enqueue_fifo
     465    _Scheduler_EDF_SMP_Enqueue
    558466  );
    559467}
     
    569477    the_thread,
    570478    node,
    571     _Scheduler_SMP_Insert_priority_lifo_order,
    572     _Scheduler_EDF_SMP_Insert_ready_lifo,
    573     _Scheduler_SMP_Insert_scheduled_lifo,
     479    _Scheduler_SMP_Priority_less_equal,
     480    _Scheduler_EDF_SMP_Insert_ready,
     481    _Scheduler_SMP_Insert_scheduled,
    574482    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
    575483    _Scheduler_EDF_SMP_Get_lowest_scheduled,
     
    592500    _Scheduler_EDF_SMP_Extract_from_ready,
    593501    _Scheduler_EDF_SMP_Do_update,
    594     _Scheduler_EDF_SMP_Enqueue_fifo,
    595     _Scheduler_EDF_SMP_Enqueue_lifo,
    596     _Scheduler_EDF_SMP_Enqueue_scheduled_fifo,
    597     _Scheduler_EDF_SMP_Enqueue_scheduled_lifo,
     502    _Scheduler_EDF_SMP_Enqueue,
     503    _Scheduler_EDF_SMP_Enqueue_scheduled,
    598504    _Scheduler_EDF_SMP_Do_ask_for_help
    599505  );
     
    673579    idle,
    674580    _Scheduler_EDF_SMP_Has_ready,
    675     _Scheduler_EDF_SMP_Enqueue_scheduled_fifo,
     581    _Scheduler_EDF_SMP_Enqueue_scheduled,
    676582    _Scheduler_EDF_SMP_Register_idle
    677583  );
     
    689595    cpu,
    690596    _Scheduler_EDF_SMP_Extract_from_ready,
    691     _Scheduler_EDF_SMP_Enqueue_fifo
     597    _Scheduler_EDF_SMP_Enqueue
    692598  );
    693599}
     
    706612    node,
    707613    _Scheduler_EDF_SMP_Extract_from_ready,
    708     _Scheduler_EDF_SMP_Enqueue_fifo,
    709     _Scheduler_EDF_SMP_Enqueue_scheduled_fifo
     614    _Scheduler_EDF_SMP_Enqueue,
     615    _Scheduler_EDF_SMP_Enqueue_scheduled
    710616  );
    711617}
     
    778684    _Scheduler_EDF_SMP_Get_highest_ready,
    779685    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
    780     _Scheduler_EDF_SMP_Enqueue_fifo,
     686    _Scheduler_EDF_SMP_Enqueue,
    781687    _Scheduler_EDF_SMP_Allocate_processor
    782688  );
  • cpukit/score/src/scheduleredfunblock.c

    r5018894e rc597fb1  
    3232  Scheduler_EDF_Node    *the_node;
    3333  Priority_Control       priority;
    34   bool                   prepend_it;
     34  Priority_Control       insert_priority;
    3535
    3636  context = _Scheduler_EDF_Get_context( scheduler );
    3737  the_node = _Scheduler_EDF_Node_downcast( node );
    38   priority = _Scheduler_Node_get_priority( &the_node->Base, &prepend_it );
    39   (void) prepend_it;
     38  priority = _Scheduler_Node_get_priority( &the_node->Base );
     39  priority = SCHEDULER_PRIORITY_PURIFY( priority );
     40  insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
    4041
    4142  the_node->priority = priority;
    42   _Scheduler_EDF_Enqueue( context, the_node, priority );
     43  _Scheduler_EDF_Enqueue( context, the_node, insert_priority );
    4344
    4445  /*
  • cpukit/score/src/schedulerpriority.c

    r5018894e rc597fb1  
    2020
    2121#include <rtems/score/schedulerpriorityimpl.h>
    22 #include <rtems/score/wkspace.h>
    2322
    2423void _Scheduler_priority_Initialize( const Scheduler_Control *scheduler )
     
    5049  _Scheduler_priority_Ready_queue_update(
    5150    &the_node->Ready_queue,
    52     priority,
     51    SCHEDULER_PRIORITY_UNMAP( priority ),
    5352    &context->Bit_map,
    5453    &context->Ready[ 0 ]
  • cpukit/score/src/schedulerpriorityaffinitysmp.c

    r5018894e rc597fb1  
    4040 */
    4141
    42 static bool _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order(
     42static bool _Scheduler_priority_affinity_SMP_Priority_less_equal(
    4343  const void       *to_insert,
    4444  const Chain_Node *next
     
    4646{
    4747  return next != NULL
    48     && _Scheduler_SMP_Insert_priority_lifo_order( to_insert, next );
    49 }
    50 
    51 static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
    52   const void       *to_insert,
    53   const Chain_Node *next
    54 )
    55 {
    56   return next != NULL
    57     && _Scheduler_SMP_Insert_priority_fifo_order( to_insert, next );
     48    && _Scheduler_SMP_Priority_less_equal( to_insert, next );
    5849}
    5950
     
    243234 * This method is unique to this scheduler because it must pass
    244235 * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
    245  * _Scheduler_SMP_Enqueue_ordered.
     236 * _Scheduler_SMP_Enqueue.
    246237 */
    247238static bool _Scheduler_priority_affinity_SMP_Enqueue_fifo(
    248239  Scheduler_Context *context,
    249   Scheduler_Node    *node
    250 )
    251 {
    252   return _Scheduler_SMP_Enqueue_ordered(
    253     context,
    254     node,
    255     _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order,
    256     _Scheduler_priority_SMP_Insert_ready_fifo,
    257     _Scheduler_SMP_Insert_scheduled_fifo,
     240  Scheduler_Node    *node,
     241  Priority_Control   insert_priority
     242)
     243{
     244  return _Scheduler_SMP_Enqueue(
     245    context,
     246    node,
     247    insert_priority,
     248    _Scheduler_priority_affinity_SMP_Priority_less_equal,
     249    _Scheduler_priority_SMP_Insert_ready,
     250    _Scheduler_SMP_Insert_scheduled,
    258251    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
    259252    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
     
    281274  while (1) {
    282275    Priority_Control lowest_scheduled_priority;
     276    Priority_Control insert_priority;
    283277
    284278    if ( _Priority_bit_map_Is_empty( &self->Bit_map ) ) {
     
    313307
    314308    if (
    315       _Scheduler_SMP_Insert_priority_lifo_order(
     309      _Scheduler_SMP_Priority_less_equal(
    316310        &lowest_scheduled_priority,
    317311        &highest_ready->Node.Chain
     
    327321
    328322    _Scheduler_priority_SMP_Extract_from_ready( context, highest_ready );
     323    insert_priority = _Scheduler_SMP_Node_priority( highest_ready );
     324    insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
    329325    _Scheduler_SMP_Enqueue_to_scheduled(
    330326      context,
    331327      highest_ready,
     328      insert_priority,
    332329      lowest_scheduled,
    333       _Scheduler_SMP_Insert_scheduled_fifo,
     330      _Scheduler_SMP_Insert_scheduled,
    334331      _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
    335332      _Scheduler_SMP_Allocate_processor_exact
     
    365362/*
    366363 *  This is unique to this scheduler because it passes scheduler specific
    367  *  get_lowest_scheduled helper to _Scheduler_SMP_Enqueue_ordered.
    368  */
    369 static bool _Scheduler_priority_affinity_SMP_Enqueue_ordered(
    370   Scheduler_Context     *context,
    371   Scheduler_Node        *node,
    372   Chain_Node_order       order,
    373   Scheduler_SMP_Insert   insert_ready,
    374   Scheduler_SMP_Insert   insert_scheduled
    375 )
    376 {
    377   return _Scheduler_SMP_Enqueue_ordered(
    378     context,
    379     node,
    380     order,
    381     insert_ready,
    382     insert_scheduled,
     364 *  get_lowest_scheduled helper to _Scheduler_SMP_Enqueue.
     365 */
     366static bool _Scheduler_priority_affinity_SMP_Enqueue(
     367  Scheduler_Context *context,
     368  Scheduler_Node    *node,
     369  Priority_Control   insert_priority
     370)
     371{
     372  return _Scheduler_SMP_Enqueue(
     373    context,
     374    node,
     375    insert_priority,
     376    _Scheduler_priority_affinity_SMP_Priority_less_equal,
     377    _Scheduler_priority_SMP_Insert_ready,
     378    _Scheduler_SMP_Insert_scheduled,
    383379    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
    384380    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
     
    388384
    389385/*
    390  *  This is unique to this scheduler because it is on the path
    391  *  to _Scheduler_priority_affinity_SMP_Enqueue_ordered() which
    392  *  invokes a scheduler unique get_lowest_scheduled helper.
    393  */
    394 static bool _Scheduler_priority_affinity_SMP_Enqueue_lifo(
     386 * This method is unique to this scheduler because it must
     387 * invoke _Scheduler_SMP_Enqueue_scheduled() with
     388 * this scheduler's get_highest_ready() helper.
     389 */
     390static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled(
    395391  Scheduler_Context *context,
    396   Scheduler_Node    *node
    397 )
    398 {
    399   return _Scheduler_priority_affinity_SMP_Enqueue_ordered(
    400     context,
    401     node,
    402     _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order,
    403     _Scheduler_priority_SMP_Insert_ready_lifo,
    404     _Scheduler_SMP_Insert_scheduled_lifo
    405   );
    406 }
    407 
    408 /*
    409  * This method is unique to this scheduler because it must
    410  * invoke _Scheduler_SMP_Enqueue_scheduled_ordered() with
    411  * this scheduler's get_highest_ready() helper.
    412  */
    413 static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
    414   Scheduler_Context    *context,
    415   Scheduler_Node       *node,
    416   Chain_Node_order      order,
    417   Scheduler_SMP_Insert  insert_ready,
    418   Scheduler_SMP_Insert  insert_scheduled
    419 )
    420 {
    421   return _Scheduler_SMP_Enqueue_scheduled_ordered(
    422     context,
    423     node,
    424     order,
     392  Scheduler_Node    *node,
     393  Priority_Control   insert_priority
     394)
     395{
     396  return _Scheduler_SMP_Enqueue_scheduled(
     397    context,
     398    node,
     399    insert_priority,
     400    _Scheduler_SMP_Priority_less_equal,
    425401    _Scheduler_priority_SMP_Extract_from_ready,
    426402    _Scheduler_priority_affinity_SMP_Get_highest_ready,
    427     insert_ready,
    428     insert_scheduled,
     403    _Scheduler_priority_SMP_Insert_ready,
     404    _Scheduler_SMP_Insert_scheduled,
    429405    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
    430406    _Scheduler_SMP_Allocate_processor_exact
    431   );
    432 }
    433 
    434 /*
    435  *  This is unique to this scheduler because it is on the path
    436  *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
    437  *  invokes a scheduler unique get_lowest_scheduled helper.
    438  */
    439 static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
    440   Scheduler_Context *context,
    441   Scheduler_Node    *node
    442 )
    443 {
    444   return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
    445     context,
    446     node,
    447     _Scheduler_SMP_Insert_priority_lifo_order,
    448     _Scheduler_priority_SMP_Insert_ready_lifo,
    449     _Scheduler_SMP_Insert_scheduled_lifo
    450   );
    451 }
    452 
    453 /*
    454  *  This is unique to this scheduler because it is on the path
    455  *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
    456  *  invokes a scheduler unique get_lowest_scheduled helper.
    457  */
    458 static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
    459   Scheduler_Context *context,
    460   Scheduler_Node    *node
    461 )
    462 {
    463   return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
    464     context,
    465     node,
    466     _Scheduler_SMP_Insert_priority_fifo_order,
    467     _Scheduler_priority_SMP_Insert_ready_fifo,
    468     _Scheduler_SMP_Insert_scheduled_fifo
    469407  );
    470408}
     
    480418    the_thread,
    481419    node,
    482     _Scheduler_SMP_Insert_priority_lifo_order,
    483     _Scheduler_priority_SMP_Insert_ready_lifo,
    484     _Scheduler_SMP_Insert_scheduled_lifo,
     420    _Scheduler_SMP_Priority_less_equal,
     421    _Scheduler_priority_SMP_Insert_ready,
     422    _Scheduler_SMP_Insert_scheduled,
    485423    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
    486424    _Scheduler_SMP_Get_lowest_scheduled,
     
    503441    _Scheduler_priority_SMP_Extract_from_ready,
    504442    _Scheduler_priority_SMP_Do_update,
    505     _Scheduler_priority_affinity_SMP_Enqueue_fifo,
    506     _Scheduler_priority_affinity_SMP_Enqueue_lifo,
    507     _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
    508     _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo,
     443    _Scheduler_priority_affinity_SMP_Enqueue,
     444    _Scheduler_priority_affinity_SMP_Enqueue_scheduled,
    509445    _Scheduler_priority_affinity_SMP_Do_ask_for_help
    510446  );
     
    575511    idle,
    576512    _Scheduler_priority_SMP_Has_ready,
    577     _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
     513    _Scheduler_priority_affinity_SMP_Enqueue_scheduled,
    578514    _Scheduler_SMP_Do_nothing_register_idle
    579515  );
     
    591527    cpu,
    592528    _Scheduler_priority_SMP_Extract_from_ready,
    593     _Scheduler_priority_affinity_SMP_Enqueue_fifo
     529    _Scheduler_priority_affinity_SMP_Enqueue
    594530  );
    595531}
  • cpukit/score/src/schedulerprioritychangepriority.c

    r5018894e rc597fb1  
    3030  Scheduler_priority_Context *context;
    3131  Scheduler_priority_Node    *the_node;
    32   unsigned int                priority;
    33   bool                        prepend_it;
     32  unsigned int                new_priority;
     33  unsigned int                unmapped_priority;
    3434
    3535  if ( !_Thread_Is_ready( the_thread ) ) {
     
    3939
    4040  the_node = _Scheduler_priority_Node_downcast( node );
    41   priority = (unsigned int )
    42     _Scheduler_Node_get_priority( &the_node->Base, &prepend_it );
     41  new_priority = (unsigned int)
     42    _Scheduler_Node_get_priority( &the_node->Base );
     43  unmapped_priority = SCHEDULER_PRIORITY_UNMAP( new_priority );
    4344
    44   if ( priority == the_node->Ready_queue.current_priority ) {
     45  if ( unmapped_priority == the_node->Ready_queue.current_priority ) {
    4546    /* Nothing to do */
    4647    return;
     
    5758  _Scheduler_priority_Ready_queue_update(
    5859    &the_node->Ready_queue,
    59     priority,
     60    unmapped_priority,
    6061    &context->Bit_map,
    6162    &context->Ready[ 0 ]
    6263  );
    6364
    64   if ( prepend_it ) {
    65     _Scheduler_priority_Ready_queue_enqueue_first(
     65  if ( SCHEDULER_PRIORITY_IS_APPEND( new_priority ) ) {
     66    _Scheduler_priority_Ready_queue_enqueue(
    6667      &the_thread->Object.Node,
    6768      &the_node->Ready_queue,
     
    6970    );
    7071  } else {
    71     _Scheduler_priority_Ready_queue_enqueue(
     72    _Scheduler_priority_Ready_queue_enqueue_first(
    7273      &the_thread->Object.Node,
    7374      &the_node->Ready_queue,
  • cpukit/score/src/schedulerprioritysmp.c

    r5018894e rc597fb1  
    6969  _Scheduler_priority_Ready_queue_update(
    7070    &the_node->Ready_queue,
    71     priority,
     71    SCHEDULER_PRIORITY_UNMAP( priority ),
    7272    &self->Bit_map,
    7373    &self->Ready[ 0 ]
     
    110110}
    111111
    112 static bool _Scheduler_priority_SMP_Enqueue_ordered(
    113   Scheduler_Context    *context,
    114   Scheduler_Node       *node,
    115   Chain_Node_order      order,
    116   Scheduler_SMP_Insert  insert_ready,
    117   Scheduler_SMP_Insert  insert_scheduled
    118 )
    119 {
    120   return _Scheduler_SMP_Enqueue_ordered(
    121     context,
    122     node,
    123     order,
    124     insert_ready,
    125     insert_scheduled,
     112static bool _Scheduler_priority_SMP_Enqueue(
     113  Scheduler_Context *context,
     114  Scheduler_Node    *node,
     115  Priority_Control   insert_priority
     116)
     117{
     118  return _Scheduler_SMP_Enqueue(
     119    context,
     120    node,
     121    insert_priority,
     122    _Scheduler_SMP_Priority_less_equal,
     123    _Scheduler_priority_SMP_Insert_ready,
     124    _Scheduler_SMP_Insert_scheduled,
    126125    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
    127126    _Scheduler_SMP_Get_lowest_scheduled,
     
    130129}
    131130
    132 static bool _Scheduler_priority_SMP_Enqueue_lifo(
     131static bool _Scheduler_priority_SMP_Enqueue_scheduled(
    133132  Scheduler_Context *context,
    134   Scheduler_Node    *node
    135 )
    136 {
    137   return _Scheduler_priority_SMP_Enqueue_ordered(
    138     context,
    139     node,
    140     _Scheduler_SMP_Insert_priority_lifo_order,
    141     _Scheduler_priority_SMP_Insert_ready_lifo,
    142     _Scheduler_SMP_Insert_scheduled_lifo
    143   );
    144 }
    145 
    146 static bool _Scheduler_priority_SMP_Enqueue_fifo(
    147   Scheduler_Context *context,
    148   Scheduler_Node    *node
    149 )
    150 {
    151   return _Scheduler_priority_SMP_Enqueue_ordered(
    152     context,
    153     node,
    154     _Scheduler_SMP_Insert_priority_fifo_order,
    155     _Scheduler_priority_SMP_Insert_ready_fifo,
    156     _Scheduler_SMP_Insert_scheduled_fifo
    157   );
    158 }
    159 
    160 static bool _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
    161   Scheduler_Context *context,
    162   Scheduler_Node *node,
    163   Chain_Node_order order,
    164   Scheduler_SMP_Insert insert_ready,
    165   Scheduler_SMP_Insert insert_scheduled
    166 )
    167 {
    168   return _Scheduler_SMP_Enqueue_scheduled_ordered(
    169     context,
    170     node,
    171     order,
     133  Scheduler_Node    *node,
     134  Priority_Control   insert_priority
     135)
     136{
     137  return _Scheduler_SMP_Enqueue_scheduled(
     138    context,
     139    node,
     140    insert_priority,
     141    _Scheduler_SMP_Priority_less_equal,
    172142    _Scheduler_priority_SMP_Extract_from_ready,
    173143    _Scheduler_priority_SMP_Get_highest_ready,
    174     insert_ready,
    175     insert_scheduled,
     144    _Scheduler_priority_SMP_Insert_ready,
     145    _Scheduler_SMP_Insert_scheduled,
    176146    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
    177147    _Scheduler_SMP_Allocate_processor_lazy
     
    179149}
    180150
    181 static bool _Scheduler_priority_SMP_Enqueue_scheduled_lifo(
    182   Scheduler_Context *context,
    183   Scheduler_Node *node
    184 )
    185 {
    186   return _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
    187     context,
    188     node,
    189     _Scheduler_SMP_Insert_priority_lifo_order,
    190     _Scheduler_priority_SMP_Insert_ready_lifo,
    191     _Scheduler_SMP_Insert_scheduled_lifo
    192   );
    193 }
    194 
    195 static bool _Scheduler_priority_SMP_Enqueue_scheduled_fifo(
    196   Scheduler_Context *context,
    197   Scheduler_Node *node
    198 )
    199 {
    200   return _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
    201     context,
    202     node,
    203     _Scheduler_SMP_Insert_priority_fifo_order,
    204     _Scheduler_priority_SMP_Insert_ready_fifo,
    205     _Scheduler_SMP_Insert_scheduled_fifo
    206   );
    207 }
    208 
    209151void _Scheduler_priority_SMP_Unblock(
    210152  const Scheduler_Control *scheduler,
     
    220162    node,
    221163    _Scheduler_priority_SMP_Do_update,
    222     _Scheduler_priority_SMP_Enqueue_fifo
     164    _Scheduler_priority_SMP_Enqueue
    223165  );
    224166}
     
    234176    the_thread,
    235177    node,
    236     _Scheduler_SMP_Insert_priority_lifo_order,
    237     _Scheduler_priority_SMP_Insert_ready_lifo,
    238     _Scheduler_SMP_Insert_scheduled_lifo,
     178    _Scheduler_SMP_Priority_less_equal,
     179    _Scheduler_priority_SMP_Insert_ready,
     180    _Scheduler_SMP_Insert_scheduled,
    239181    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
    240182    _Scheduler_SMP_Get_lowest_scheduled,
     
    257199    _Scheduler_priority_SMP_Extract_from_ready,
    258200    _Scheduler_priority_SMP_Do_update,
    259     _Scheduler_priority_SMP_Enqueue_fifo,
    260     _Scheduler_priority_SMP_Enqueue_lifo,
    261     _Scheduler_priority_SMP_Enqueue_scheduled_fifo,
    262     _Scheduler_priority_SMP_Enqueue_scheduled_lifo,
     201    _Scheduler_priority_SMP_Enqueue,
     202    _Scheduler_priority_SMP_Enqueue_scheduled,
    263203    _Scheduler_priority_SMP_Do_ask_for_help
    264204  );
     
    324264    idle,
    325265    _Scheduler_priority_SMP_Has_ready,
    326     _Scheduler_priority_SMP_Enqueue_scheduled_fifo,
     266    _Scheduler_priority_SMP_Enqueue_scheduled,
    327267    _Scheduler_SMP_Do_nothing_register_idle
    328268  );
     
    340280    cpu,
    341281    _Scheduler_priority_SMP_Extract_from_ready,
    342     _Scheduler_priority_SMP_Enqueue_fifo
     282    _Scheduler_priority_SMP_Enqueue
    343283  );
    344284}
     
    357297    node,
    358298    _Scheduler_priority_SMP_Extract_from_ready,
    359     _Scheduler_priority_SMP_Enqueue_fifo,
    360     _Scheduler_priority_SMP_Enqueue_scheduled_fifo
    361   );
    362 }
     299    _Scheduler_priority_SMP_Enqueue,
     300    _Scheduler_priority_SMP_Enqueue_scheduled
     301  );
     302}
  • cpukit/score/src/schedulerpriorityunblock.c

    r5018894e rc597fb1  
    3232  Scheduler_priority_Node    *the_node;
    3333  unsigned int                priority;
    34   bool                        prepend_it;
     34  unsigned int                unmapped_priority;
    3535
    3636  context = _Scheduler_priority_Get_context( scheduler );
    3737  the_node = _Scheduler_priority_Node_downcast( node );
    38   priority = (unsigned int )
    39     _Scheduler_Node_get_priority( &the_node->Base, &prepend_it );
    40   (void) prepend_it;
     38  priority = (unsigned int ) _Scheduler_Node_get_priority( &the_node->Base );
     39  unmapped_priority = SCHEDULER_PRIORITY_UNMAP( priority );
    4140
    42   if ( priority != the_node->Ready_queue.current_priority ) {
     41  if ( unmapped_priority != the_node->Ready_queue.current_priority ) {
    4342    _Scheduler_priority_Ready_queue_update(
    4443      &the_node->Ready_queue,
    45       priority,
     44      unmapped_priority,
    4645      &context->Bit_map,
    4746      &context->Ready[ 0 ]
  • cpukit/score/src/schedulersimplechangepriority.c

    r5018894e rc597fb1  
    2929{
    3030  Scheduler_simple_Context *context;
    31   bool                      prepend_it;
     31  unsigned int              new_priority;
    3232
    3333  if ( !_Thread_Is_ready( the_thread ) ) {
     
    3737
    3838  context = _Scheduler_simple_Get_context( scheduler );
    39   _Scheduler_Node_get_priority( node, &prepend_it );
     39  new_priority = (unsigned int ) _Scheduler_Node_get_priority( node );
    4040
    4141  _Scheduler_simple_Extract( scheduler, the_thread, node );
    42 
    43   if ( prepend_it ) {
    44     _Scheduler_simple_Insert_priority_lifo( &context->Ready, the_thread );
    45   } else {
    46     _Scheduler_simple_Insert_priority_fifo( &context->Ready, the_thread );
    47   }
    48 
     42  _Scheduler_simple_Insert( &context->Ready, the_thread, new_priority );
    4943  _Scheduler_simple_Schedule_body( scheduler, the_thread, false );
    5044}
  • cpukit/score/src/schedulersimplesmp.c

    r5018894e rc597fb1  
    100100{
    101101  Scheduler_simple_SMP_Context *self;
    102   Priority_Control              priority_to_insert;
     102  Priority_Control              insert_priority;
    103103
    104104  self = _Scheduler_simple_SMP_Get_self( context );
    105   priority_to_insert = _Scheduler_SMP_Node_priority( scheduled_to_ready );
    106105
    107106  _Chain_Extract_unprotected( &scheduled_to_ready->Node.Chain );
     107  insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
    108108  _Chain_Insert_ordered_unprotected(
    109109    &self->Ready,
    110110    &scheduled_to_ready->Node.Chain,
    111     &priority_to_insert,
    112     _Scheduler_SMP_Insert_priority_lifo_order
     111    &insert_priority,
     112    _Scheduler_SMP_Priority_less_equal
    113113  );
    114114}
     
    120120{
    121121  Scheduler_simple_SMP_Context *self;
    122   Priority_Control              priority_to_insert;
     122  Priority_Control              insert_priority;
    123123
    124124  self = _Scheduler_simple_SMP_Get_self( context );
    125   priority_to_insert = _Scheduler_SMP_Node_priority( ready_to_scheduled );
    126125
    127126  _Chain_Extract_unprotected( &ready_to_scheduled->Node.Chain );
     127  insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
     128  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
    128129  _Chain_Insert_ordered_unprotected(
    129130    &self->Base.Scheduled,
    130131    &ready_to_scheduled->Node.Chain,
    131     &priority_to_insert,
    132     _Scheduler_SMP_Insert_priority_fifo_order
    133   );
    134 }
    135 
    136 static void _Scheduler_simple_SMP_Insert_ready_lifo(
    137   Scheduler_Context *context,
    138   Scheduler_Node    *node_to_insert
     132    &insert_priority,
     133    _Scheduler_SMP_Priority_less_equal
     134  );
     135}
     136
     137static void _Scheduler_simple_SMP_Insert_ready(
     138  Scheduler_Context *context,
     139  Scheduler_Node    *node_to_insert,
     140  Priority_Control   insert_priority
    139141)
    140142{
    141143  Scheduler_simple_SMP_Context *self;
    142   Priority_Control              priority_to_insert;
    143144
    144145  self = _Scheduler_simple_SMP_Get_self( context );
    145   priority_to_insert = _Scheduler_SMP_Node_priority( node_to_insert );
    146146
    147147  _Chain_Insert_ordered_unprotected(
    148148    &self->Ready,
    149149    &node_to_insert->Node.Chain,
    150     &priority_to_insert,
    151     _Scheduler_SMP_Insert_priority_lifo_order
    152   );
    153 }
    154 
    155 static void _Scheduler_simple_SMP_Insert_ready_fifo(
    156   Scheduler_Context *context,
    157   Scheduler_Node    *node_to_insert
    158 )
    159 {
    160   Scheduler_simple_SMP_Context *self;
    161   Priority_Control              priority_to_insert;
    162 
    163   self = _Scheduler_simple_SMP_Get_self( context );
    164   priority_to_insert = _Scheduler_SMP_Node_priority( node_to_insert );
    165 
    166   _Chain_Insert_ordered_unprotected(
    167     &self->Ready,
    168     &node_to_insert->Node.Chain,
    169     &priority_to_insert,
    170     _Scheduler_SMP_Insert_priority_fifo_order
     150    &insert_priority,
     151    _Scheduler_SMP_Priority_less_equal
    171152  );
    172153}
     
    201182}
    202183
    203 static bool _Scheduler_simple_SMP_Enqueue_ordered(
    204   Scheduler_Context    *context,
    205   Scheduler_Node       *node,
    206   Chain_Node_order      order,
    207   Scheduler_SMP_Insert  insert_ready,
    208   Scheduler_SMP_Insert  insert_scheduled
    209 )
    210 {
    211   return _Scheduler_SMP_Enqueue_ordered(
    212     context,
    213     node,
    214     order,
    215     insert_ready,
    216     insert_scheduled,
     184static bool _Scheduler_simple_SMP_Enqueue(
     185  Scheduler_Context *context,
     186  Scheduler_Node    *node,
     187  Priority_Control   insert_priority
     188)
     189{
     190  return _Scheduler_SMP_Enqueue(
     191    context,
     192    node,
     193    insert_priority,
     194    _Scheduler_SMP_Priority_less_equal,
     195    _Scheduler_simple_SMP_Insert_ready,
     196    _Scheduler_SMP_Insert_scheduled,
    217197    _Scheduler_simple_SMP_Move_from_scheduled_to_ready,
    218198    _Scheduler_SMP_Get_lowest_scheduled,
     
    221201}
    222202
    223 static bool _Scheduler_simple_SMP_Enqueue_lifo(
    224   Scheduler_Context *context,
    225   Scheduler_Node    *node
    226 )
    227 {
    228   return _Scheduler_simple_SMP_Enqueue_ordered(
    229     context,
    230     node,
    231     _Scheduler_SMP_Insert_priority_lifo_order,
    232     _Scheduler_simple_SMP_Insert_ready_lifo,
    233     _Scheduler_SMP_Insert_scheduled_lifo
    234   );
    235 }
    236 
    237 static bool _Scheduler_simple_SMP_Enqueue_fifo(
    238   Scheduler_Context *context,
    239   Scheduler_Node    *node
    240 )
    241 {
    242   return _Scheduler_simple_SMP_Enqueue_ordered(
    243     context,
    244     node,
    245     _Scheduler_SMP_Insert_priority_fifo_order,
    246     _Scheduler_simple_SMP_Insert_ready_fifo,
    247     _Scheduler_SMP_Insert_scheduled_fifo
    248   );
    249 }
    250 
    251 static bool _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
    252   Scheduler_Context *context,
    253   Scheduler_Node *node,
    254   Chain_Node_order order,
    255   Scheduler_SMP_Insert insert_ready,
    256   Scheduler_SMP_Insert insert_scheduled
    257 )
    258 {
    259   return _Scheduler_SMP_Enqueue_scheduled_ordered(
    260     context,
    261     node,
    262     order,
     203static bool _Scheduler_simple_SMP_Enqueue_scheduled(
     204  Scheduler_Context *context,
     205  Scheduler_Node    *node,
     206  Priority_Control   insert_priority
     207)
     208{
     209  return _Scheduler_SMP_Enqueue_scheduled(
     210    context,
     211    node,
     212    insert_priority,
     213    _Scheduler_SMP_Priority_less_equal,
    263214    _Scheduler_simple_SMP_Extract_from_ready,
    264215    _Scheduler_simple_SMP_Get_highest_ready,
    265     insert_ready,
    266     insert_scheduled,
     216    _Scheduler_simple_SMP_Insert_ready,
     217    _Scheduler_SMP_Insert_scheduled,
    267218    _Scheduler_simple_SMP_Move_from_ready_to_scheduled,
    268219    _Scheduler_SMP_Allocate_processor_lazy
     
    270221}
    271222
    272 static bool _Scheduler_simple_SMP_Enqueue_scheduled_lifo(
    273   Scheduler_Context *context,
    274   Scheduler_Node *node
    275 )
    276 {
    277   return _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
    278     context,
    279     node,
    280     _Scheduler_SMP_Insert_priority_lifo_order,
    281     _Scheduler_simple_SMP_Insert_ready_lifo,
    282     _Scheduler_SMP_Insert_scheduled_lifo
    283   );
    284 }
    285 
    286 static bool _Scheduler_simple_SMP_Enqueue_scheduled_fifo(
    287   Scheduler_Context *context,
    288   Scheduler_Node *node
    289 )
    290 {
    291   return _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
    292     context,
    293     node,
    294     _Scheduler_SMP_Insert_priority_fifo_order,
    295     _Scheduler_simple_SMP_Insert_ready_fifo,
    296     _Scheduler_SMP_Insert_scheduled_fifo
    297   );
    298 }
    299 
    300223void _Scheduler_simple_SMP_Unblock(
    301224  const Scheduler_Control *scheduler,
     
    311234    node,
    312235    _Scheduler_simple_SMP_Do_update,
    313     _Scheduler_simple_SMP_Enqueue_fifo
     236    _Scheduler_simple_SMP_Enqueue
    314237  );
    315238}
     
    325248    the_thread,
    326249    node,
    327     _Scheduler_SMP_Insert_priority_lifo_order,
    328     _Scheduler_simple_SMP_Insert_ready_lifo,
    329     _Scheduler_SMP_Insert_scheduled_lifo,
     250    _Scheduler_SMP_Priority_less_equal,
     251    _Scheduler_simple_SMP_Insert_ready,
     252    _Scheduler_SMP_Insert_scheduled,
    330253    _Scheduler_simple_SMP_Move_from_scheduled_to_ready,
    331254    _Scheduler_SMP_Get_lowest_scheduled,
     
    348271    _Scheduler_simple_SMP_Extract_from_ready,
    349272    _Scheduler_simple_SMP_Do_update,
    350     _Scheduler_simple_SMP_Enqueue_fifo,
    351     _Scheduler_simple_SMP_Enqueue_lifo,
    352     _Scheduler_simple_SMP_Enqueue_scheduled_fifo,
    353     _Scheduler_simple_SMP_Enqueue_scheduled_lifo,
     273    _Scheduler_simple_SMP_Enqueue,
     274    _Scheduler_simple_SMP_Enqueue_scheduled,
    354275    _Scheduler_simple_SMP_Do_ask_for_help
    355276  );
     
    415336    idle,
    416337    _Scheduler_simple_SMP_Has_ready,
    417     _Scheduler_simple_SMP_Enqueue_scheduled_fifo,
     338    _Scheduler_simple_SMP_Enqueue_scheduled,
    418339    _Scheduler_SMP_Do_nothing_register_idle
    419340  );
     
    431352    cpu,
    432353    _Scheduler_simple_SMP_Extract_from_ready,
    433     _Scheduler_simple_SMP_Enqueue_fifo
     354    _Scheduler_simple_SMP_Enqueue
    434355  );
    435356}
     
    448369    node,
    449370    _Scheduler_simple_SMP_Extract_from_ready,
    450     _Scheduler_simple_SMP_Enqueue_fifo,
    451     _Scheduler_simple_SMP_Enqueue_scheduled_fifo
    452   );
    453 }
     371    _Scheduler_simple_SMP_Enqueue,
     372    _Scheduler_simple_SMP_Enqueue_scheduled
     373  );
     374}
  • cpukit/score/src/schedulersimpleunblock.c

    r5018894e rc597fb1  
    2929{
    3030  Scheduler_simple_Context *context;
    31   Priority_Control          priority;
     31  unsigned int              priority;
     32  unsigned int              insert_priority;
    3233
    3334  (void) node;
    3435
    3536  context = _Scheduler_simple_Get_context( scheduler );
    36   _Scheduler_simple_Insert_priority_fifo( &context->Ready, the_thread );
    3737  priority = _Thread_Get_priority( the_thread );
     38  insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
     39  _Scheduler_simple_Insert( &context->Ready, the_thread, insert_priority );
    3840
    3941  /*
  • cpukit/score/src/schedulersimpleyield.c

    r5018894e rc597fb1  
    2727)
    2828{
    29   Scheduler_simple_Context *context =
    30     _Scheduler_simple_Get_context( scheduler );
     29  Scheduler_simple_Context *context;
     30  unsigned int              insert_priority;
     31
     32  context = _Scheduler_simple_Get_context( scheduler );
    3133
    3234  (void) node;
    3335
    3436  _Chain_Extract_unprotected( &the_thread->Object.Node );
    35   _Scheduler_simple_Insert_priority_fifo( &context->Ready, the_thread );
     37  insert_priority = (unsigned int) _Thread_Get_priority( the_thread );
     38  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
     39  _Scheduler_simple_Insert( &context->Ready, the_thread, insert_priority );
    3640  _Scheduler_simple_Schedule_body( scheduler, the_thread, false );
    3741}
  • cpukit/score/src/schedulerstrongapa.c

    r5018894e rc597fb1  
    6767  Scheduler_strong_APA_Context *self;
    6868  Scheduler_strong_APA_Node    *node;
    69   Priority_Control              priority;
     69  Priority_Control              insert_priority;
    7070
    7171  self = _Scheduler_strong_APA_Get_self( context );
     
    7777    &self->Bit_map
    7878  );
    79   priority = node->Base.priority;
     79  insert_priority = _Scheduler_SMP_Node_priority( &node->Base.Base );
     80  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
    8081  _Chain_Insert_ordered_unprotected(
    8182    &self->Base.Scheduled,
    8283    &node->Base.Base.Node.Chain,
    83     &priority,
    84     _Scheduler_SMP_Insert_priority_fifo_order
    85   );
    86 }
    87 
    88 static void _Scheduler_strong_APA_Insert_ready_lifo(
    89   Scheduler_Context *context,
    90   Scheduler_Node    *the_thread
    91 )
    92 {
    93   Scheduler_strong_APA_Context *self =
    94     _Scheduler_strong_APA_Get_self( context );
    95   Scheduler_strong_APA_Node *node =
    96     _Scheduler_strong_APA_Node_downcast( the_thread );
    97 
    98   _Scheduler_priority_Ready_queue_enqueue(
    99     &node->Base.Base.Node.Chain,
    100     &node->Ready_queue,
    101     &self->Bit_map
    102   );
    103 }
    104 
    105 static void _Scheduler_strong_APA_Insert_ready_fifo(
    106   Scheduler_Context *context,
    107   Scheduler_Node    *the_thread
    108 )
    109 {
    110   Scheduler_strong_APA_Context *self =
    111     _Scheduler_strong_APA_Get_self( context );
    112   Scheduler_strong_APA_Node *node =
    113     _Scheduler_strong_APA_Node_downcast( the_thread );
    114 
    115   _Scheduler_priority_Ready_queue_enqueue_first(
    116     &node->Base.Base.Node.Chain,
    117     &node->Ready_queue,
    118     &self->Bit_map
    119   );
     84    &insert_priority,
     85    _Scheduler_SMP_Priority_less_equal
     86  );
     87}
     88
     89static void _Scheduler_strong_APA_Insert_ready(
     90  Scheduler_Context *context,
     91  Scheduler_Node    *node_base,
     92  Priority_Control   insert_priority
     93)
     94{
     95  Scheduler_strong_APA_Context *self;
     96  Scheduler_strong_APA_Node    *node;
     97
     98  self = _Scheduler_strong_APA_Get_self( context );
     99  node = _Scheduler_strong_APA_Node_downcast( node_base );
     100
     101  if ( SCHEDULER_PRIORITY_IS_APPEND( insert_priority ) ) {
     102    _Scheduler_priority_Ready_queue_enqueue(
     103      &node->Base.Base.Node.Chain,
     104      &node->Ready_queue,
     105      &self->Bit_map
     106    );
     107  } else {
     108    _Scheduler_priority_Ready_queue_enqueue_first(
     109      &node->Base.Base.Node.Chain,
     110      &node->Ready_queue,
     111      &self->Bit_map
     112    );
     113  }
    120114}
    121115
     
    151145  _Scheduler_priority_Ready_queue_update(
    152146    &node->Ready_queue,
    153     new_priority,
     147    SCHEDULER_PRIORITY_UNMAP( new_priority ),
    154148    &self->Bit_map,
    155149    &self->Ready[ 0 ]
     
    199193  _Scheduler_priority_Ready_queue_update(
    200194    &the_node->Ready_queue,
    201     priority,
     195    SCHEDULER_PRIORITY_UNMAP( priority ),
    202196    &self->Bit_map,
    203197    &self->Ready[ 0 ]
     
    248242}
    249243
    250 static bool _Scheduler_strong_APA_Enqueue_ordered(
    251   Scheduler_Context    *context,
    252   Scheduler_Node       *node,
    253   Chain_Node_order      order,
    254   Scheduler_SMP_Insert  insert_ready,
    255   Scheduler_SMP_Insert  insert_scheduled
    256 )
    257 {
    258   return _Scheduler_SMP_Enqueue_ordered(
    259     context,
    260     node,
    261     order,
    262     insert_ready,
    263     insert_scheduled,
     244static bool _Scheduler_strong_APA_Enqueue(
     245  Scheduler_Context *context,
     246  Scheduler_Node    *node,
     247  Priority_Control   insert_priority
     248)
     249{
     250  return _Scheduler_SMP_Enqueue(
     251    context,
     252    node,
     253    insert_priority,
     254    _Scheduler_SMP_Priority_less_equal,
     255    _Scheduler_strong_APA_Insert_ready,
     256    _Scheduler_SMP_Insert_scheduled,
    264257    _Scheduler_strong_APA_Move_from_scheduled_to_ready,
    265258    _Scheduler_SMP_Get_lowest_scheduled,
     
    268261}
    269262
    270 static bool _Scheduler_strong_APA_Enqueue_lifo(
    271   Scheduler_Context *context,
    272   Scheduler_Node    *node
    273 )
    274 {
    275   return _Scheduler_strong_APA_Enqueue_ordered(
    276     context,
    277     node,
    278     _Scheduler_SMP_Insert_priority_lifo_order,
    279     _Scheduler_strong_APA_Insert_ready_lifo,
    280     _Scheduler_SMP_Insert_scheduled_lifo
    281   );
    282 }
    283 
    284 static bool _Scheduler_strong_APA_Enqueue_fifo(
    285   Scheduler_Context *context,
    286   Scheduler_Node    *node
    287 )
    288 {
    289   return _Scheduler_strong_APA_Enqueue_ordered(
    290     context,
    291     node,
    292     _Scheduler_SMP_Insert_priority_fifo_order,
    293     _Scheduler_strong_APA_Insert_ready_fifo,
    294     _Scheduler_SMP_Insert_scheduled_fifo
    295   );
    296 }
    297 
    298 static bool _Scheduler_strong_APA_Enqueue_scheduled_ordered(
    299   Scheduler_Context    *context,
    300   Scheduler_Node       *node,
    301   Chain_Node_order      order,
    302   Scheduler_SMP_Insert  insert_ready,
    303   Scheduler_SMP_Insert  insert_scheduled
    304 )
    305 {
    306   return _Scheduler_SMP_Enqueue_scheduled_ordered(
    307     context,
    308     node,
    309     order,
     263static bool _Scheduler_strong_APA_Enqueue_scheduled(
     264  Scheduler_Context *context,
     265  Scheduler_Node    *node,
     266  Priority_Control  insert_priority
     267)
     268{
     269  return _Scheduler_SMP_Enqueue_scheduled(
     270    context,
     271    node,
     272    insert_priority,
     273    _Scheduler_SMP_Priority_less_equal,
    310274    _Scheduler_strong_APA_Extract_from_ready,
    311275    _Scheduler_strong_APA_Get_highest_ready,
    312     insert_ready,
    313     insert_scheduled,
     276    _Scheduler_strong_APA_Insert_ready,
     277    _Scheduler_SMP_Insert_scheduled,
    314278    _Scheduler_strong_APA_Move_from_ready_to_scheduled,
    315279    _Scheduler_SMP_Allocate_processor_exact
     
    317281}
    318282
    319 static bool _Scheduler_strong_APA_Enqueue_scheduled_lifo(
    320   Scheduler_Context *context,
    321   Scheduler_Node    *node
    322 )
    323 {
    324   return _Scheduler_strong_APA_Enqueue_scheduled_ordered(
    325     context,
    326     node,
    327     _Scheduler_SMP_Insert_priority_lifo_order,
    328     _Scheduler_strong_APA_Insert_ready_lifo,
    329     _Scheduler_SMP_Insert_scheduled_lifo
    330   );
    331 }
    332 
    333 static bool _Scheduler_strong_APA_Enqueue_scheduled_fifo(
    334   Scheduler_Context *context,
    335   Scheduler_Node    *node
    336 )
    337 {
    338   return _Scheduler_strong_APA_Enqueue_scheduled_ordered(
    339     context,
    340     node,
    341     _Scheduler_SMP_Insert_priority_fifo_order,
    342     _Scheduler_strong_APA_Insert_ready_fifo,
    343     _Scheduler_SMP_Insert_scheduled_fifo
    344   );
    345 }
    346 
    347283void _Scheduler_strong_APA_Unblock(
    348284  const Scheduler_Control *scheduler,
     
    358294    node,
    359295    _Scheduler_strong_APA_Do_update,
    360     _Scheduler_strong_APA_Enqueue_fifo
     296    _Scheduler_strong_APA_Enqueue
    361297  );
    362298}
     
    372308    the_thread,
    373309    node,
    374     _Scheduler_SMP_Insert_priority_lifo_order,
    375     _Scheduler_strong_APA_Insert_ready_lifo,
    376     _Scheduler_SMP_Insert_scheduled_lifo,
     310    _Scheduler_SMP_Priority_less_equal,
     311    _Scheduler_strong_APA_Insert_ready,
     312    _Scheduler_SMP_Insert_scheduled,
    377313    _Scheduler_strong_APA_Move_from_scheduled_to_ready,
    378314    _Scheduler_SMP_Get_lowest_scheduled,
     
    395331    _Scheduler_strong_APA_Extract_from_ready,
    396332    _Scheduler_strong_APA_Do_update,
    397     _Scheduler_strong_APA_Enqueue_fifo,
    398     _Scheduler_strong_APA_Enqueue_lifo,
    399     _Scheduler_strong_APA_Enqueue_scheduled_fifo,
    400     _Scheduler_strong_APA_Enqueue_scheduled_lifo,
     333    _Scheduler_strong_APA_Enqueue,
     334    _Scheduler_strong_APA_Enqueue_scheduled,
    401335    _Scheduler_strong_APA_Do_ask_for_help
    402336  );
     
    462396    idle,
    463397    _Scheduler_strong_APA_Has_ready,
    464     _Scheduler_strong_APA_Enqueue_scheduled_fifo,
     398    _Scheduler_strong_APA_Enqueue_scheduled,
    465399    _Scheduler_SMP_Do_nothing_register_idle
    466400  );
     
    478412    cpu,
    479413    _Scheduler_strong_APA_Extract_from_ready,
    480     _Scheduler_strong_APA_Enqueue_fifo
     414    _Scheduler_strong_APA_Enqueue
    481415  );
    482416}
     
    495429    node,
    496430    _Scheduler_strong_APA_Extract_from_ready,
    497     _Scheduler_strong_APA_Enqueue_fifo,
    498     _Scheduler_strong_APA_Enqueue_scheduled_fifo
    499   );
    500 }
     431    _Scheduler_strong_APA_Enqueue,
     432    _Scheduler_strong_APA_Enqueue_scheduled
     433  );
     434}
  • testsuites/sptests/spintrcritical23/init.c

    r5018894e rc597fb1  
    11/*
    2  * Copyright (c) 2015, 2016 embedded brains GmbH.  All rights reserved.
     2 * Copyright (c) 2015, 2017 embedded brains GmbH.  All rights reserved.
    33 *
    44 *  embedded brains GmbH
     
    4444  test_context *ctx = &ctx_instance;
    4545  rtems_interrupt_lock_context lock_context;
     46  unsigned int next_priority;
    4647
    4748  rtems_interrupt_lock_acquire(&ctx->lock, &lock_context);
    48   if (
    49     ctx->scheduler_node->Ready_queue.current_priority
    50       != ctx->scheduler_node->Base.Priority.value
    51   ) {
     49
     50  next_priority = SCHEDULER_PRIORITY_UNMAP(
     51    (unsigned int) ctx->scheduler_node->Base.Priority.value
     52  );
     53
     54  if ( ctx->scheduler_node->Ready_queue.current_priority != next_priority ) {
    5255    rtems_task_priority priority_interrupt;
    5356    rtems_task_priority priority_task;
     
    8588
    8689  rtems_interrupt_lock_acquire(&ctx->lock, &lock_context);
     90
    8791  priority_last = ctx->priority_task;
    8892  priority_task = 1 + (priority_last + 1) % 3;
     
    9094  ctx->priority_task = priority_task;
    9195  ctx->priority_interrupt = priority_interrupt;
     96
    9297  rtems_interrupt_lock_release(&ctx->lock, &lock_context);
    9398
Note: See TracChangeset for help on using the changeset viewer.