Changeset 3a27248 in rtems


Ignore:
Timestamp:
Oct 21, 2016, 7:23:58 AM (3 years ago)
Author:
Sebastian Huber <sebastian.huber@…>
Branches:
master
Children:
73a193f
Parents:
913864c
git-author:
Sebastian Huber <sebastian.huber@…> (10/21/16 07:23:58)
git-committer:
Sebastian Huber <sebastian.huber@…> (11/02/16 09:05:43)
Message:

score: First part of new MrsP implementation

Update #2556.

Files:
4 added
16 edited

Legend:

Unmodified
Added
Removed
  • cpukit/libmisc/monitor/mon-sema.c

    r913864c r3a27248  
    8585      case SEMAPHORE_VARIANT_MRSP:
    8686        canonical_sema->cur_count =
    87           rtems_sema->Core_control.MRSP.Resource.owner == NULL;
     87          _MRSP_Get_owner( &rtems_sema->Core_control.MRSP ) == NULL;
    8888        canonical_sema->max_count = 1;
    8989        break;
  • cpukit/sapi/src/interrtext.c

    r913864c r3a27248  
    5656  "INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL",
    5757  "INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL",
    58   "INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK"
     58  "INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK",
     59  "INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE"
    5960};
    6061
  • cpukit/score/include/rtems/score/interr.h

    r913864c r3a27248  
    165165  INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL,
    166166  INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL,
    167   INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
     167  INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK,
     168  INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
    168169} Internal_errors_Core_list;
    169170
  • cpukit/score/include/rtems/score/mrsp.h

    r913864c r3a27248  
    2020#if defined(RTEMS_SMP)
    2121
    22 #include <rtems/score/chain.h>
    23 #include <rtems/score/scheduler.h>
    24 #include <rtems/score/thread.h>
    2522#include <rtems/score/threadq.h>
    2623
     
    5249 */
    5350
    54 typedef struct MRSP_Control MRSP_Control;
    55 
    5651/**
    57  * @brief MrsP rival.
    58  *
    59  * The rivals are used by threads waiting for resource ownership.  They are
    60  * registered in the MrsP control block.
     52 * @brief MrsP control block.
    6153 */
    6254typedef struct {
    6355  /**
    64    * @brief The node for registration in the MrsP rival chain.
    65    *
    66    * The chain operations are protected by the MrsP control lock.
    67    *
    68    * @see MRSP_Control::Rivals.
    69    */
    70   Chain_Node Node;
    71 
    72   /**
    73    * @brief The corresponding MrsP control block.
    74    */
    75   MRSP_Control *resource;
    76 
    77   /**
    78    * @brief Identification of the rival thread.
    79    */
    80   Thread_Control *thread;
    81 
    82   /**
    83    * @brief The ceiling priority used by the rival thread.
    84    */
    85   Priority_Node Ceiling_priority;
    86 
    87   /**
    88    * @brief The initial help state of the thread at the begin of the resource
    89    * obtain sequence.
    90    *
    91    * Used to restore this state after a timeout.
    92    */
    93   Scheduler_Help_state initial_help_state;
    94 
    95   /**
    96    * @brief The rival status.
    97    *
    98    * Initially the status is set to MRSP_WAIT_FOR_OWNERSHIP.  The rival will
    99    * busy wait until a status change happens.  This can be STATUS_SUCCESSFUL or
    100    * STATUS_TIMEOUT.  State changes are protected by the MrsP control lock.
    101    */
    102   volatile int status;
    103 
    104   /**
    105    * @brief Watchdog for timeouts.
    106    */
    107   Watchdog_Control Watchdog;
    108 } MRSP_Rival;
    109 
    110 /**
    111  * @brief MrsP control block.
    112  */
    113 struct MRSP_Control {
    114   /**
    115    * @brief Lock to protect the resource dependency tree.
    116    *
    117    * This is a thread queue since this simplifies the Classic semaphore
    118    * implementation.  Only the lock part of the thread queue is used.
     56   * @brief The thread queue to manage ownership and waiting threads.
    11957   */
    12058  Thread_queue_Control Wait_queue;
    121 
    122   /**
    123    * @brief Basic resource control.
    124    */
    125   Resource_Control Resource;
    126 
    127   /**
    128    * @brief A chain of MrsP rivals waiting for resource ownership.
    129    *
    130    * @see MRSP_Rival::Node.
    131    */
    132   Chain_Control Rivals;
    13359
    13460  /**
     
    14167   */
    14268  Priority_Control *ceiling_priorities;
    143 };
     69} MRSP_Control;
    14470
    14571/** @} */
  • cpukit/score/include/rtems/score/mrspimpl.h

    r913864c r3a27248  
    2121
    2222#include <rtems/score/assert.h>
    23 #include <rtems/score/chainimpl.h>
    24 #include <rtems/score/resourceimpl.h>
    25 #include <rtems/score/schedulerimpl.h>
    2623#include <rtems/score/status.h>
    2724#include <rtems/score/threadqimpl.h>
     
    3936 */
    4037
    41 /**
    42  * @brief Internal state used for MRSP_Rival::status to indicate that this
    43  * rival waits for resource ownership.
    44  */
    45 #define MRSP_WAIT_FOR_OWNERSHIP STATUS_MINUS_ONE
    46 
    47 /*
    48  * FIXME: Operations with the resource dependency tree are protected by the
    49  * global scheduler lock.  Since the scheduler lock should be scheduler
    50  * instance specific in the future this will only work temporarily.  A more
    51  * sophisticated locking strategy is necessary.
    52  */
    53 
    54 RTEMS_INLINE_ROUTINE void _MRSP_Giant_acquire( ISR_lock_Context *lock_context )
    55 {
    56   /* FIXME: MrsP protocol implementation will be reworked soon */
    57 }
    58 
    59 RTEMS_INLINE_ROUTINE void _MRSP_Giant_release( ISR_lock_Context *lock_context )
    60 {
    61   /* FIXME: MrsP protocol implementation will be reworked soon */
    62 }
     38#define MRSP_TQ_OPERATIONS &_Thread_queue_Operations_priority_inherit
    6339
    6440RTEMS_INLINE_ROUTINE void _MRSP_Acquire_critical(
     
    7652{
    7753  _Thread_queue_Release( &mrsp->Wait_queue, queue_context );
     54}
     55
     56RTEMS_INLINE_ROUTINE Thread_Control *_MRSP_Get_owner( MRSP_Control *mrsp )
     57{
     58  return mrsp->Wait_queue.Queue.owner;
     59}
     60
     61RTEMS_INLINE_ROUTINE void _MRSP_Set_owner(
     62  MRSP_Control   *mrsp,
     63  Thread_Control *owner
     64)
     65{
     66  mrsp->Wait_queue.Queue.owner = owner;
    7867}
    7968
     
    150139  MRSP_Control   *mrsp,
    151140  Thread_Control *thread,
    152   MRSP_Rival     *rival
     141  Priority_Node  *ceiling_priority
    153142)
    154143{
    155144  ISR_lock_Context lock_context;
    156145
    157   _Thread_Wait_acquire_default_critical( thread, &lock_context );
     146  _Thread_Wait_acquire_default( thread, &lock_context );
    158147  _Thread_Priority_replace(
    159148    thread,
    160     &rival->Ceiling_priority,
     149    ceiling_priority,
    161150    &mrsp->Ceiling_priority
    162151  );
    163   _Thread_Wait_release_default_critical( thread, &lock_context );
     152  _Thread_Wait_release_default( thread, &lock_context );
    164153}
    165154
    166155RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
    167156  MRSP_Control         *mrsp,
    168   Thread_Control       *new_owner,
     157  Thread_Control       *executing,
    169158  Thread_queue_Context *queue_context
    170159)
     
    175164  status = _MRSP_Raise_priority(
    176165    mrsp,
    177     new_owner,
     166    executing,
    178167    &mrsp->Ceiling_priority,
    179168    queue_context
     
    185174  }
    186175
    187   _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
    188   _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
    189   _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
    190 
     176  _MRSP_Set_owner( mrsp, executing );
    191177  cpu_self = _Thread_Dispatch_disable_critical(
    192178    &queue_context->Lock_context.Lock_context
    193179  );
    194180  _MRSP_Release( mrsp, queue_context );
    195 
    196   _Thread_Priority_update( queue_context );
    197 
     181  _Thread_Priority_and_sticky_update( executing, 1 );
    198182  _Thread_Dispatch_enable( cpu_self );
    199183  return STATUS_SUCCESSFUL;
     
    235219  }
    236220
    237   _Resource_Initialize( &mrsp->Resource );
    238   _Chain_Initialize_empty( &mrsp->Rivals );
    239221  _Thread_queue_Initialize( &mrsp->Wait_queue );
    240 
    241222  return STATUS_SUCCESSFUL;
    242223}
    243224
    244 RTEMS_INLINE_ROUTINE void _MRSP_Timeout( Watchdog_Control *watchdog )
    245 {
    246   MRSP_Rival           *rival;
    247   MRSP_Control         *mrsp;
    248   Thread_Control       *thread;
    249   Thread_queue_Context  queue_context;
    250 
    251   rival = RTEMS_CONTAINER_OF( watchdog, MRSP_Rival, Watchdog );
    252   mrsp = rival->resource;
    253   thread = rival->thread;
    254 
    255   _Thread_queue_Context_initialize( &queue_context );
    256   _ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
    257   _MRSP_Acquire_critical( mrsp, &queue_context );
    258 
    259   if ( rival->status == MRSP_WAIT_FOR_OWNERSHIP ) {
    260     ISR_lock_Context giant_lock_context;
    261 
    262     _MRSP_Remove_priority( thread, &rival->Ceiling_priority, &queue_context );
    263 
    264     _MRSP_Giant_acquire( &giant_lock_context );
    265 
    266     _Chain_Extract_unprotected( &rival->Node );
    267     _Resource_Node_extract( &thread->Resource_node );
    268     _Resource_Node_set_dependency( &thread->Resource_node, NULL );
    269     _Scheduler_Thread_change_help_state( thread, rival->initial_help_state );
    270     _Scheduler_Thread_change_resource_root( thread, thread );
    271 
    272     _MRSP_Giant_release( &giant_lock_context );
    273 
    274     rival->status = STATUS_TIMEOUT;
    275 
    276     _MRSP_Release( mrsp, &queue_context );
    277 
    278     _Thread_Priority_update( &queue_context );
    279   } else {
    280     _MRSP_Release( mrsp, &queue_context );
    281   }
    282 }
    283 
    284225RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
    285226  MRSP_Control         *mrsp,
    286   Resource_Node        *owner,
    287227  Thread_Control       *executing,
    288228  Thread_queue_Context *queue_context
    289229)
    290230{
    291   Status_Control     status;
    292   MRSP_Rival         rival;
    293   Thread_Life_state  life_state;
    294   Per_CPU_Control   *cpu_self;
    295   ISR_lock_Context   giant_lock_context;
    296   ISR_Level          level;
    297   Watchdog_Interval  timeout;
    298 
    299   _Assert( queue_context->timeout_discipline == WATCHDOG_RELATIVE );
     231  Status_Control status;
     232  Priority_Node  ceiling_priority;
    300233
    301234  status = _MRSP_Raise_priority(
    302235    mrsp,
    303236    executing,
    304     &rival.Ceiling_priority,
     237    &ceiling_priority,
    305238    queue_context
    306239  );
     
    311244  }
    312245
    313   rival.thread = executing;
    314   rival.resource = mrsp;
    315   _Chain_Initialize_node( &rival.Node );
    316 
    317   _MRSP_Giant_acquire( &giant_lock_context );
    318 
    319   rival.initial_help_state =
    320     _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_ACTIVE_RIVAL );
    321   rival.status = MRSP_WAIT_FOR_OWNERSHIP;
    322 
    323   _Chain_Initialize_node( &rival.Node );
    324   _Chain_Append_unprotected( &mrsp->Rivals, &rival.Node );
    325   _Resource_Add_rival( &mrsp->Resource, &executing->Resource_node );
    326   _Resource_Node_set_dependency( &executing->Resource_node, &mrsp->Resource );
    327   _Scheduler_Thread_change_resource_root(
     246  _Thread_queue_Context_set_deadlock_callout(
     247    queue_context,
     248    _Thread_queue_Deadlock_status
     249  );
     250  status = _Thread_queue_Enqueue_sticky(
     251    &mrsp->Wait_queue.Queue,
     252    MRSP_TQ_OPERATIONS,
    328253    executing,
    329     THREAD_RESOURCE_NODE_TO_THREAD( _Resource_Node_get_root( owner ) )
    330   );
    331 
    332   _MRSP_Giant_release( &giant_lock_context );
    333 
    334   cpu_self = _Thread_Dispatch_disable_critical(
    335     &queue_context->Lock_context.Lock_context
    336   );
    337   _MRSP_Release( mrsp, queue_context );
    338 
    339   _Thread_Priority_update( queue_context );
    340 
    341   timeout = (Watchdog_Interval) queue_context->timeout;
    342 
    343   if ( timeout > 0 ) {
    344     _Watchdog_Preinitialize( &rival.Watchdog, cpu_self );
    345     _Watchdog_Initialize( &rival.Watchdog, _MRSP_Timeout );
    346     _ISR_Local_disable( level );
    347     _Watchdog_Per_CPU_insert_relative( &rival.Watchdog, cpu_self, timeout );
    348     _ISR_Local_enable( level );
    349   }
    350 
    351   life_state = _Thread_Set_life_protection( THREAD_LIFE_PROTECTED );
    352   _Thread_Dispatch_enable( cpu_self );
    353 
    354   _Assert( _Debug_Is_thread_dispatching_allowed() );
    355 
    356   /* Wait for state change */
    357   do {
    358     status = rival.status;
    359   } while ( status == MRSP_WAIT_FOR_OWNERSHIP );
    360 
    361   _Thread_Set_life_protection( life_state );
    362 
    363   if ( timeout > 0 ) {
    364     _ISR_Local_disable( level );
    365     _Watchdog_Per_CPU_remove(
    366       &rival.Watchdog,
    367       cpu_self,
    368       &cpu_self->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ]
     254    queue_context
     255  );
     256
     257  if ( status == STATUS_SUCCESSFUL ) {
     258    _MRSP_Replace_priority( mrsp, executing, &ceiling_priority );
     259  } else {
     260    Thread_queue_Context  queue_context;
     261    Per_CPU_Control      *cpu_self;
     262    int                   sticky_level_change;
     263
     264    if ( status != STATUS_DEADLOCK ) {
     265      sticky_level_change = -1;
     266    } else {
     267      sticky_level_change = 0;
     268    }
     269
     270    _ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
     271    _MRSP_Remove_priority( executing, &ceiling_priority, &queue_context );
     272    cpu_self = _Thread_Dispatch_disable_critical(
     273      &queue_context.Lock_context.Lock_context
    369274    );
    370     _ISR_Local_enable( level );
     275    _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
     276    _Thread_Priority_and_sticky_update( executing, sticky_level_change );
     277    _Thread_Dispatch_enable( cpu_self );
    371278  }
    372279
     
    382289{
    383290  Status_Control  status;
    384   Resource_Node *owner;
     291  Thread_Control *owner;
    385292
    386293  _MRSP_Acquire_critical( mrsp, queue_context );
    387294
    388   owner = _Resource_Get_owner( &mrsp->Resource );
     295  owner = _MRSP_Get_owner( mrsp );
    389296
    390297  if ( owner == NULL ) {
    391298    status = _MRSP_Claim_ownership( mrsp, executing, queue_context );
    392   } else if (
    393     wait
    394       && _Resource_Node_get_root( owner ) != &executing->Resource_node
    395   ) {
    396     status = _MRSP_Wait_for_ownership( mrsp, owner, executing, queue_context );
     299  } else if ( owner == executing ) {
     300    _MRSP_Release( mrsp, queue_context );
     301    status = STATUS_UNAVAILABLE;
     302  } else if ( wait ) {
     303    status = _MRSP_Wait_for_ownership( mrsp, executing, queue_context );
    397304  } else {
    398305    _MRSP_Release( mrsp, queue_context );
    399     /* Not available, nested access or deadlock */
    400306    status = STATUS_UNAVAILABLE;
    401307  }
     
    410316)
    411317{
    412   ISR_lock_Context  giant_lock_context;
    413   Per_CPU_Control  *cpu_self;
    414 
    415   if ( _Resource_Get_owner( &mrsp->Resource ) != &executing->Resource_node ) {
     318  Thread_queue_Heads *heads;
     319
     320  if ( _MRSP_Get_owner( mrsp ) != executing ) {
    416321    _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
    417322    return STATUS_NOT_OWNER;
    418323  }
    419324
    420   if (
    421     !_Resource_Is_most_recently_obtained(
    422       &mrsp->Resource,
    423       &executing->Resource_node
    424     )
    425   ) {
    426     _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
    427     return STATUS_RELEASE_ORDER_VIOLATION;
    428   }
    429 
    430325  _MRSP_Acquire_critical( mrsp, queue_context );
     326
     327  _MRSP_Set_owner( mrsp, NULL );
    431328  _MRSP_Remove_priority( executing, &mrsp->Ceiling_priority, queue_context );
    432   _MRSP_Giant_acquire( &giant_lock_context );
    433 
    434   _Resource_Extract( &mrsp->Resource );
    435 
    436   if ( _Chain_Is_empty( &mrsp->Rivals ) ) {
    437     _Resource_Set_owner( &mrsp->Resource, NULL );
    438   } else {
    439     MRSP_Rival     *rival;
    440     Thread_Control *new_owner;
    441 
    442     rival = (MRSP_Rival *) _Chain_Get_first_unprotected( &mrsp->Rivals );
    443 
    444     /*
    445      * This must be inside the critical section since the status prevents a
    446      * potential double extraction in _MRSP_Timeout().
    447      */
    448     rival->status = STATUS_SUCCESSFUL;
    449 
    450     new_owner = rival->thread;
    451 
    452     _MRSP_Replace_priority( mrsp, new_owner, rival );
    453 
    454     _Resource_Node_extract( &new_owner->Resource_node );
    455     _Resource_Node_set_dependency( &new_owner->Resource_node, NULL );
    456     _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
    457     _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
    458     _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
    459     _Scheduler_Thread_change_resource_root( new_owner, new_owner );
    460   }
    461 
    462   if ( !_Resource_Node_owns_resources( &executing->Resource_node ) ) {
    463     _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_YOURSELF );
    464   }
    465 
    466   _MRSP_Giant_release( &giant_lock_context );
    467 
    468   cpu_self = _Thread_Dispatch_disable_critical(
    469     &queue_context->Lock_context.Lock_context
    470   );
    471   _MRSP_Release( mrsp, queue_context );
    472 
    473   _Thread_Priority_update( queue_context );
    474 
    475   _Thread_Dispatch_enable( cpu_self );
    476 
     329
     330  heads = mrsp->Wait_queue.Queue.heads;
     331
     332  if ( heads == NULL ) {
     333    Per_CPU_Control *cpu_self;
     334
     335    cpu_self = _Thread_Dispatch_disable_critical(
     336      &queue_context->Lock_context.Lock_context
     337    );
     338    _MRSP_Release( mrsp, queue_context );
     339    _Thread_Priority_and_sticky_update( executing, -1 );
     340    _Thread_Dispatch_enable( cpu_self );
     341    return STATUS_SUCCESSFUL;
     342  }
     343
     344  _Thread_queue_Surrender_sticky(
     345    &mrsp->Wait_queue.Queue,
     346    heads,
     347    executing,
     348    queue_context,
     349    MRSP_TQ_OPERATIONS
     350  );
    477351  return STATUS_SUCCESSFUL;
    478352}
     
    480354RTEMS_INLINE_ROUTINE Status_Control _MRSP_Can_destroy( MRSP_Control *mrsp )
    481355{
    482   if ( _Resource_Get_owner( &mrsp->Resource ) != NULL ) {
     356  if ( _MRSP_Get_owner( mrsp ) != NULL ) {
    483357    return STATUS_RESOURCE_IN_USE;
    484358  }
  • cpukit/score/include/rtems/score/schedulerimpl.h

    r913864c r3a27248  
    553553#endif
    554554}
     555
     556#if defined(RTEMS_SMP)
     557/**
     558 * @brief Changes the sticky level of the home scheduler node and propagates a
     559 * priority change of a thread to the scheduler.
     560 *
     561 * @param[in] the_thread The thread changing its priority or sticky level.
     562 *
     563 * @see _Scheduler_Update_priority().
     564 */
     565RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
     566  Thread_Control *the_thread,
     567  int             sticky_level_change
     568)
     569{
     570  Chain_Node              *node;
     571  const Chain_Node        *tail;
     572  Scheduler_Node          *scheduler_node;
     573  const Scheduler_Control *scheduler;
     574  ISR_lock_Context         lock_context;
     575
     576  _Thread_Scheduler_process_requests( the_thread );
     577
     578  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
     579  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
     580  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
     581
     582  _Scheduler_Acquire_critical( scheduler, &lock_context );
     583
     584  ( *scheduler->Operations.update_priority )(
     585    scheduler,
     586    the_thread,
     587    scheduler_node
     588  );
     589
     590  _Scheduler_Release_critical( scheduler, &lock_context );
     591
     592  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
     593  node = _Chain_Next( node );
     594
     595  while ( node != tail ) {
     596    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
     597    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
     598
     599    _Scheduler_Acquire_critical( scheduler, &lock_context );
     600    ( *scheduler->Operations.update_priority )(
     601      scheduler,
     602      the_thread,
     603      scheduler_node
     604    );
     605    _Scheduler_Release_critical( scheduler, &lock_context );
     606
     607    node = _Chain_Next( node );
     608  }
     609}
     610#endif
    555611
    556612/**
  • cpukit/score/include/rtems/score/status.h

    r913864c r3a27248  
    114114  STATUS_OBJECT_WAS_DELETED =
    115115    STATUS_BUILD( STATUS_CLASSIC_OBJECT_WAS_DELETED, EINVAL ),
    116   STATUS_RELEASE_ORDER_VIOLATION =
    117     STATUS_BUILD( STATUS_CLASSIC_INCORRECT_STATE, EPERM ),
    118116  STATUS_RESOURCE_IN_USE =
    119117    STATUS_BUILD( STATUS_CLASSIC_RESOURCE_IN_USE, EBUSY ),
  • cpukit/score/include/rtems/score/threadimpl.h

    r913864c r3a27248  
    17131713}
    17141714
     1715RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
     1716  const Thread_Control *the_thread
     1717)
     1718{
     1719#if defined(RTEMS_SMP)
     1720  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
     1721#else
     1722  return the_thread->Wait.flags;
     1723#endif
     1724}
     1725
    17151726/**
    17161727 * @brief Tries to change the thread wait flags with release semantics in case
  • cpukit/score/include/rtems/score/threadqimpl.h

    r913864c r3a27248  
    2525#include <rtems/score/scheduler.h>
    2626#include <rtems/score/smp.h>
     27#include <rtems/score/status.h>
    2728#include <rtems/score/thread.h>
    2829
     
    554555);
    555556
     557#if defined(RTEMS_SMP)
     558/**
     559 * @brief Enqueues the thread on the thread queue and busy waits for dequeue.
     560 *
     561 * Optionally starts the thread timer in case the timeout discipline is not
     562 * WATCHDOG_NO_TIMEOUT. Timeout discipline and value are in the queue_context.
     563 *
     564 * The caller must be the owner of the thread queue lock.  This function will
     565 * release the thread queue lock and register it as the new thread lock.
     566 *
     567 * The thread priorities of the owner and the are updated with respect to the
     568 * scheduler.  The sticky level of the thread is incremented.  A thread
     569 * dispatch is performed if necessary.
     570 *
     571 * Afterwards, the thread busy waits on the thread wait flags until a timeout
     572 * occurs or the thread queue is surrendered to this thread.  So, it sticks to
     573 * the processor instead of blocking with respect to the scheduler.
     574 *
     575 * @param[in] queue The actual thread queue.
     576 * @param[in] operations The thread queue operations.
     577 * @param[in] the_thread The thread to enqueue.
     578 * @param[in] queue_context The thread queue context of the lock acquire.
     579 */
     580Status_Control _Thread_queue_Enqueue_sticky(
     581  Thread_queue_Queue            *queue,
     582  const Thread_queue_Operations *operations,
     583  Thread_Control                *the_thread,
     584  Thread_queue_Context          *queue_context
     585);
     586#endif
     587
    556588/**
    557589 * @brief Acquires the thread queue lock and calls
     
    733765  const Thread_queue_Operations *operations
    734766);
     767
     768#if defined(RTEMS_SMP)
     769/**
     770 * @brief Surrenders the thread queue previously owned by the thread to the
     771 * first enqueued thread.
     772 *
     773 * The owner of the thread queue must be set to NULL by the caller.
     774 *
     775 * The caller must be the owner of the thread queue lock.  This function will
     776 * release the thread queue.
     777 *
     778 * The thread priorities of the previous owner and the new owner are updated.  The
     779 * sticky level of the previous owner is decremented.  A thread dispatch is
     780 * performed if necessary.
     781 *
     782 * @param[in] queue The actual thread queue.
     783 * @param[in] heads The thread queue heads.  It must not be NULL.
     784 * @param[in] previous_owner The previous owner thread surrendering the thread
     785 *   queue.
     786 * @param[in] queue_context The thread queue context of the lock acquire.
     787 * @param[in] operations The thread queue operations.
     788 */
     789void _Thread_queue_Surrender_sticky(
     790  Thread_queue_Queue            *queue,
     791  Thread_queue_Heads            *heads,
     792  Thread_Control                *previous_owner,
     793  Thread_queue_Context          *queue_context,
     794  const Thread_queue_Operations *operations
     795);
     796#endif
    735797
    736798RTEMS_INLINE_ROUTINE bool _Thread_queue_Is_empty(
  • cpukit/score/src/threadchangepriority.c

    r913864c r3a27248  
    354354  }
    355355}
     356
     357#if defined(RTEMS_SMP)
     358void _Thread_Priority_and_sticky_update(
     359  Thread_Control *the_thread,
     360  int             sticky_level_change
     361)
     362{
     363  ISR_lock_Context lock_context;
     364
     365  _Thread_State_acquire( the_thread, &lock_context );
     366  _Scheduler_Priority_and_sticky_update(
     367    the_thread,
     368    sticky_level_change
     369  );
     370  _Thread_State_release( the_thread, &lock_context );
     371}
     372#endif
  • cpukit/score/src/threadqenqueue.c

    r913864c r3a27248  
    371371}
    372372
    373 void _Thread_queue_Enqueue_critical(
    374   Thread_queue_Queue            *queue,
    375   const Thread_queue_Operations *operations,
    376   Thread_Control                *the_thread,
    377   States_Control                 state,
    378   Thread_queue_Context          *queue_context
    379 )
    380 {
    381   Per_CPU_Control *cpu_self;
    382   bool             success;
    383 
    384 #if defined(RTEMS_MULTIPROCESSING)
    385   if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
    386     the_thread = _Thread_MP_Allocate_proxy( state );
    387   }
    388 #endif
    389 
    390   _Thread_Wait_claim( the_thread, queue );
    391 
    392   if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
    393     _Thread_queue_Path_release_critical( queue_context );
    394     _Thread_Wait_restore_default( the_thread );
    395     _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
    396     _Thread_Wait_tranquilize( the_thread );
    397     ( *queue_context->deadlock_callout )( the_thread );
    398     return;
    399   }
    400 
    401   _Thread_queue_Context_clear_priority_updates( queue_context );
    402   _Thread_Wait_claim_finalize( the_thread, operations );
    403   ( *operations->enqueue )( queue, the_thread, queue_context );
    404 
    405   _Thread_queue_Path_release_critical( queue_context );
    406 
    407   the_thread->Wait.return_code = STATUS_SUCCESSFUL;
    408   _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
    409   cpu_self = _Thread_Dispatch_disable_critical(
    410     &queue_context->Lock_context.Lock_context
    411   );
    412   _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
    413 
    414   if (
    415     cpu_self->thread_dispatch_disable_level
    416       != queue_context->expected_thread_dispatch_disable_level
    417   ) {
    418     _Terminate(
    419       INTERNAL_ERROR_CORE,
    420       false,
    421       INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
    422     );
    423   }
    424 
    425   /*
    426    *  Set the blocking state for this thread queue in the thread.
    427    */
    428   _Thread_Set_state( the_thread, state );
    429 
    430   /*
    431    *  If the thread wants to timeout, then schedule its timer.
    432    */
     373static void _Thread_queue_Timeout(
     374  Thread_Control       *the_thread,
     375  Per_CPU_Control      *cpu_self,
     376  Thread_queue_Context *queue_context
     377)
     378{
    433379  switch ( queue_context->timeout_discipline ) {
    434380    case WATCHDOG_RELATIVE:
     
    454400      break;
    455401  }
     402}
     403
     404void _Thread_queue_Enqueue_critical(
     405  Thread_queue_Queue            *queue,
     406  const Thread_queue_Operations *operations,
     407  Thread_Control                *the_thread,
     408  States_Control                 state,
     409  Thread_queue_Context          *queue_context
     410)
     411{
     412  Per_CPU_Control *cpu_self;
     413  bool             success;
     414
     415#if defined(RTEMS_MULTIPROCESSING)
     416  if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
     417    the_thread = _Thread_MP_Allocate_proxy( state );
     418  }
     419#endif
     420
     421  _Thread_Wait_claim( the_thread, queue );
     422
     423  if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
     424    _Thread_queue_Path_release_critical( queue_context );
     425    _Thread_Wait_restore_default( the_thread );
     426    _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
     427    _Thread_Wait_tranquilize( the_thread );
     428    ( *queue_context->deadlock_callout )( the_thread );
     429    return;
     430  }
     431
     432  _Thread_queue_Context_clear_priority_updates( queue_context );
     433  _Thread_Wait_claim_finalize( the_thread, operations );
     434  ( *operations->enqueue )( queue, the_thread, queue_context );
     435
     436  _Thread_queue_Path_release_critical( queue_context );
     437
     438  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
     439  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
     440  cpu_self = _Thread_Dispatch_disable_critical(
     441    &queue_context->Lock_context.Lock_context
     442  );
     443  _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
     444
     445  if (
     446    cpu_self->thread_dispatch_disable_level
     447      != queue_context->expected_thread_dispatch_disable_level
     448  ) {
     449    _Terminate(
     450      INTERNAL_ERROR_CORE,
     451      false,
     452      INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
     453    );
     454  }
     455
     456  /*
     457   *  Set the blocking state for this thread queue in the thread.
     458   */
     459  _Thread_Set_state( the_thread, state );
     460
     461  /*
     462   *  If the thread wants to timeout, then schedule its timer.
     463   */
     464  _Thread_queue_Timeout( the_thread, cpu_self, queue_context );
    456465
    457466  /*
     
    476485  _Thread_Dispatch_enable( cpu_self );
    477486}
     487
     488#if defined(RTEMS_SMP)
     489Status_Control _Thread_queue_Enqueue_sticky(
     490  Thread_queue_Queue            *queue,
     491  const Thread_queue_Operations *operations,
     492  Thread_Control                *the_thread,
     493  Thread_queue_Context          *queue_context
     494)
     495{
     496  Per_CPU_Control *cpu_self;
     497
     498  _Thread_Wait_claim( the_thread, queue );
     499
     500  if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
     501    _Thread_queue_Path_release_critical( queue_context );
     502    _Thread_Wait_restore_default( the_thread );
     503    _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
     504    _Thread_Wait_tranquilize( the_thread );
     505    ( *queue_context->deadlock_callout )( the_thread );
     506    return _Thread_Wait_get_status( the_thread );
     507  }
     508
     509  _Thread_queue_Context_clear_priority_updates( queue_context );
     510  _Thread_Wait_claim_finalize( the_thread, operations );
     511  ( *operations->enqueue )( queue, the_thread, queue_context );
     512
     513  _Thread_queue_Path_release_critical( queue_context );
     514
     515  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
     516  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
     517  cpu_self = _Thread_Dispatch_disable_critical(
     518    &queue_context->Lock_context.Lock_context
     519  );
     520  _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
     521
     522  if ( cpu_self->thread_dispatch_disable_level != 1 ) {
     523    _Terminate(
     524      INTERNAL_ERROR_CORE,
     525      false,
     526      INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
     527    );
     528  }
     529
     530  _Thread_queue_Timeout( the_thread, cpu_self, queue_context );
     531  _Thread_Priority_update( queue_context );
     532  _Thread_Priority_and_sticky_update( the_thread, 1 );
     533  _Thread_Dispatch_enable( cpu_self );
     534
     535  while (
     536    _Thread_Wait_flags_get_acquire( the_thread ) == THREAD_QUEUE_INTEND_TO_BLOCK
     537  ) {
     538    /* Wait */
     539  }
     540
     541  _Thread_Wait_tranquilize( the_thread );
     542  _Thread_Timer_remove( the_thread );
     543  return _Thread_Wait_get_status( the_thread );
     544}
     545#endif
    478546
    479547#if defined(RTEMS_MULTIPROCESSING)
     
    667735}
    668736
     737#if defined(RTEMS_SMP)
     738void _Thread_queue_Surrender_sticky(
     739  Thread_queue_Queue            *queue,
     740  Thread_queue_Heads            *heads,
     741  Thread_Control                *previous_owner,
     742  Thread_queue_Context          *queue_context,
     743  const Thread_queue_Operations *operations
     744)
     745{
     746  Thread_Control  *new_owner;
     747  Per_CPU_Control *cpu_self;
     748
     749  _Assert( heads != NULL );
     750
     751  _Thread_queue_Context_clear_priority_updates( queue_context );
     752  new_owner = ( *operations->surrender )(
     753    queue,
     754    heads,
     755    previous_owner,
     756    queue_context
     757  );
     758  queue->owner = new_owner;
     759  _Thread_queue_Make_ready_again( new_owner );
     760
     761  cpu_self = _Thread_Dispatch_disable_critical(
     762    &queue_context->Lock_context.Lock_context
     763  );
     764  _Thread_queue_Queue_release(
     765    queue,
     766    &queue_context->Lock_context.Lock_context
     767  );
     768  _Thread_Priority_and_sticky_update( previous_owner, -1 );
     769  _Thread_Priority_and_sticky_update( new_owner, 0 );
     770  _Thread_Dispatch_enable( cpu_self );
     771}
     772#endif
     773
    669774Thread_Control *_Thread_queue_Do_dequeue(
    670775  Thread_queue_Control          *the_thread_queue,
  • testsuites/smptests/Makefile.am

    r913864c r3a27248  
    1818SUBDIRS += smpfatal01
    1919SUBDIRS += smpfatal02
     20SUBDIRS += smpfatal03
    2021SUBDIRS += smpfatal04
    2122SUBDIRS += smpfatal05
  • testsuites/smptests/configure.ac

    r913864c r3a27248  
    7676smpfatal01/Makefile
    7777smpfatal02/Makefile
     78smpfatal03/Makefile
    7879smpfatal04/Makefile
    7980smpfatal05/Makefile
  • testsuites/smptests/smpmrsp01/init.c

    r913864c r3a27248  
    215215}
    216216
     217static void create_timer(test_context *ctx)
     218{
     219  rtems_status_code sc;
     220
     221  sc = rtems_timer_create(
     222    rtems_build_name('T', 'I', 'M', 'R'),
     223    &ctx->timer_id
     224  );
     225  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     226}
     227
     228static void delete_timer(test_context *ctx)
     229{
     230  rtems_status_code sc;
     231
     232  sc = rtems_timer_delete(ctx->timer_id);
     233  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     234}
     235
     236static void fire_timer(
     237  test_context *ctx,
     238  rtems_interval interval,
     239  rtems_timer_service_routine_entry routine
     240)
     241{
     242  rtems_status_code sc;
     243
     244  sc = rtems_timer_fire_after(ctx->timer_id, interval, routine, ctx);
     245  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     246}
     247
    217248static void create_mrsp_sema(
    218249  test_context *ctx,
     
    745776}
    746777
    747 static void test_mrsp_unlock_order_error(test_context *ctx)
    748 {
    749   rtems_status_code sc;
    750   rtems_id id_a;
    751   rtems_id id_b;
    752 
    753   puts("test MrsP unlock order error");
    754 
    755   create_mrsp_sema(ctx, &id_a, 1);
    756   create_mrsp_sema(ctx, &id_b, 1);
    757 
    758   sc = rtems_semaphore_obtain(id_a, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    759   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    760 
    761   sc = rtems_semaphore_obtain(id_b, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    762   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    763 
    764   sc = rtems_semaphore_release(id_a);
    765   rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
    766 
    767   sc = rtems_semaphore_release(id_b);
    768   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    769 
    770   sc = rtems_semaphore_release(id_a);
    771   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    772 
    773   sc = rtems_semaphore_delete(id_a);
    774   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    775 
    776   sc = rtems_semaphore_delete(id_b);
    777   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     778static void deadlock_timer(rtems_id timer_id, void *arg)
     779{
     780  test_context *ctx = arg;
     781
     782  change_prio(ctx->main_task_id, 1);
    778783}
    779784
     
    785790  sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    786791  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     792
     793  fire_timer(ctx, 2, deadlock_timer);
    787794
    788795  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
     
    811818  change_prio(RTEMS_SELF, prio);
    812819
     820  create_timer(ctx);
    813821  create_mrsp_sema(ctx, &ctx->mrsp_ids[0], prio);
    814822  create_mrsp_sema(ctx, &ctx->mrsp_ids[1], prio);
     
    833841  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    834842
     843  prio = 1;
     844  sc = rtems_semaphore_set_priority(
     845    ctx->mrsp_ids[1],
     846    ctx->scheduler_ids[0],
     847    prio,
     848    &prio
     849  );
     850  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     851  rtems_test_assert(prio == 2);
     852
    835853  sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    836   rtems_test_assert(sc == RTEMS_UNSATISFIED);
     854  rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
     855
     856  sc = rtems_semaphore_set_priority(
     857    ctx->mrsp_ids[1],
     858    ctx->scheduler_ids[0],
     859    prio,
     860    &prio
     861  );
     862  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    837863
    838864  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
     
    850876  sc = rtems_semaphore_delete(ctx->mrsp_ids[1]);
    851877  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     878
     879  delete_timer(ctx);
    852880}
    853881
     
    10071035  assert_prio(RTEMS_SELF, 3);
    10081036
    1009   sc = rtems_timer_fire_after(ctx->timer_id, 2, unblock_ready_timer, ctx);
    1010   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     1037  fire_timer(ctx, 2, unblock_ready_timer);
    10111038
    10121039  sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
     
    11041131   */
    11051132
    1106   sc = rtems_timer_fire_after(
    1107     ctx->timer_id,
    1108     2,
    1109     unblock_owner_before_rival_timer,
    1110     ctx
    1111   );
    1112   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     1133  fire_timer(ctx, 2, unblock_owner_before_rival_timer);
    11131134
    11141135  /* This will take the processor away from us, the timer will help later */
     
    11241145  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    11251146
    1126   sc = rtems_timer_fire_after(
    1127     ctx->timer_id,
    1128     2,
    1129     unblock_owner_after_rival_timer,
    1130     ctx
    1131   );
    1132   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     1147  fire_timer(ctx, 2, unblock_owner_after_rival_timer);
    11331148
    11341149  /* This will take the processor away from us, the timer will help later */
     
    12301245  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    12311246
    1232   sc = rtems_timer_create(
    1233     rtems_build_name('T', 'I', 'M', 'R'),
    1234     &ctx->timer_id
    1235   );
    1236   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     1247  create_timer(ctx);
    12371248
    12381249  /* In case these tasks run, then we have a MrsP protocol violation */
     
    12471258
    12481259  print_switch_events(ctx);
    1249 
    1250   sc = rtems_timer_delete(ctx->timer_id);
    1251   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     1260  delete_timer(ctx);
    12521261
    12531262  sc = rtems_task_delete(ctx->high_task_id[0]);
     
    17501759  test_mrsp_initially_locked_error();
    17511760  test_mrsp_nested_obtain_error(ctx);
    1752   test_mrsp_unlock_order_error(ctx);
    17531761  test_mrsp_deadlock_error(ctx);
    17541762  test_mrsp_multiple_obtain(ctx);
  • testsuites/sptests/spinternalerror02/init.c

    r913864c r3a27248  
    3737
    3838  rtems_test_assert(
    39     error - 3 == INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
     39    error - 3 == INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
    4040  );
    4141}
  • testsuites/sptests/spinternalerror02/spinternalerror02.scn

    r913864c r3a27248  
    2929INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL
    3030INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
     31INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
    3132?
    3233?
Note: See TracChangeset for help on using the changeset viewer.