Changeset 7097962 in rtems


Ignore:
Timestamp:
Aug 29, 2018, 7:43:44 AM (8 months ago)
Author:
Sebastian Huber <sebastian.huber@…>
Branches:
master
Children:
e58f1cd3
Parents:
d8bc0730
git-author:
Sebastian Huber <sebastian.huber@…> (08/29/18 07:43:44)
git-committer:
Sebastian Huber <sebastian.huber@…> (09/10/18 08:38:45)
Message:

score: Add thread pin/unpin support

Add support to temporarily pin a thread to its current processor. This
may be used to access per-processor data structures in critical sections
with enabled thread dispatching, e.g. a pinned thread is allowed to
block.

Update #3508.

Files:
5 added
18 edited

Legend:

Unmodified
Added
Removed
  • cpukit/include/rtems/score/scheduler.h

    rd8bc0730 r7097962  
    135135    Scheduler_Node          *node,
    136136    Thread_Scheduler_state   next_state
     137  );
     138
     139  /**
     140   * @brief Pin thread operation.
     141   *
     142   * @param[in] scheduler The scheduler instance of the specified processor.
     143   * @param[in] the_thread The thread to pin.
     144   * @param[in] node The scheduler node of the thread.
     145   * @param[in] cpu The processor to pin the thread.
     146   */
     147  void ( *pin )(
     148    const Scheduler_Control *scheduler,
     149    Thread_Control          *the_thread,
     150    Scheduler_Node          *node,
     151    struct Per_CPU_Control  *cpu
     152  );
     153
     154  /**
     155   * @brief Unpin thread operation.
     156   *
     157   * @param[in] scheduler The scheduler instance of the specified processor.
     158   * @param[in] the_thread The thread to unpin.
     159   * @param[in] node The scheduler node of the thread.
     160   * @param[in] cpu The processor to unpin the thread.
     161   */
     162  void ( *unpin )(
     163    const Scheduler_Control *scheduler,
     164    Thread_Control          *the_thread,
     165    Scheduler_Node          *node,
     166    struct Per_CPU_Control  *cpu
    137167  );
    138168
     
    406436  );
    407437
     438  /**
     439   * @brief Does nothing in a single processor system, otherwise a fatal error
     440   * is issued.
     441   *
     442   * @param[in] scheduler Unused.
     443   * @param[in] the_thread Unused.
     444   * @param[in] node Unused.
     445   * @param[in] cpu Unused.
     446   */
     447  void _Scheduler_default_Pin_or_unpin(
     448    const Scheduler_Control *scheduler,
     449    Thread_Control          *the_thread,
     450    Scheduler_Node          *node,
     451    struct Per_CPU_Control  *cpu
     452  );
     453
    408454  #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
    409455    _Scheduler_default_Ask_for_help, \
    410456    _Scheduler_default_Reconsider_help_request, \
    411457    _Scheduler_default_Withdraw_node, \
     458    _Scheduler_default_Pin_or_unpin, \
     459    _Scheduler_default_Pin_or_unpin, \
    412460    NULL, \
    413461    NULL,
  • cpukit/include/rtems/score/scheduleredfsmp.h

    rd8bc0730 r7097962  
    88
    99/*
    10  * Copyright (c) 2017 embedded brains GmbH.
     10 * Copyright (c) 2017, 2018 embedded brains GmbH.
    1111 *
    1212 * The license and distribution terms for this file may be
     
    4444
    4545  /**
    46    * @brief The ready queue index depending on the processor affinity of the thread.
     46   * @brief The ready queue index depending on the processor affinity and
     47   * pinning of the thread.
    4748   *
    4849   * The ready queue index zero is used for threads with a one-to-all thread
     
    5051   * processor index plus one as the ready queue index.
    5152   */
    52   uint32_t ready_queue_index;
     53  uint8_t ready_queue_index;
     54
     55  /**
     56   * @brief Ready queue index according to thread affinity.
     57   */
     58  uint8_t affinity_ready_queue_index;
     59
     60  /**
     61   * @brief Ready queue index according to thread pinning.
     62   */
     63  uint8_t pinning_ready_queue_index;
    5364} Scheduler_EDF_SMP_Node;
    5465
     
    106117    _Scheduler_EDF_SMP_Reconsider_help_request, \
    107118    _Scheduler_EDF_SMP_Withdraw_node, \
     119    _Scheduler_EDF_SMP_Pin, \
     120    _Scheduler_EDF_SMP_Unpin, \
    108121    _Scheduler_EDF_SMP_Add_processor, \
    109122    _Scheduler_EDF_SMP_Remove_processor, \
     
    163176);
    164177
     178void _Scheduler_EDF_SMP_Pin(
     179  const Scheduler_Control *scheduler,
     180  Thread_Control          *the_thread,
     181  Scheduler_Node          *node,
     182  struct Per_CPU_Control  *cpu
     183);
     184
     185void _Scheduler_EDF_SMP_Unpin(
     186  const Scheduler_Control *scheduler,
     187  Thread_Control          *the_thread,
     188  Scheduler_Node          *node,
     189  struct Per_CPU_Control  *cpu
     190);
     191
    165192void _Scheduler_EDF_SMP_Add_processor(
    166193  const Scheduler_Control *scheduler,
  • cpukit/include/rtems/score/schedulerimpl.h

    rd8bc0730 r7097962  
    10961096  );
    10971097
    1098   if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
     1098  if (
     1099    !_Priority_Is_empty( &old_scheduler_node->Wait.Priority )
     1100#if defined(RTEMS_SMP)
     1101      || !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes )
     1102      || the_thread->Scheduler.pin_level != 0
     1103#endif
     1104  ) {
    10991105    _Priority_Plain_insert(
    11001106      &old_scheduler_node->Wait.Priority,
     
    11061112
    11071113#if defined(RTEMS_SMP)
    1108   if ( !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes ) ) {
    1109     _Priority_Plain_insert(
    1110       &old_scheduler_node->Wait.Priority,
    1111       &the_thread->Real_priority,
    1112       the_thread->Real_priority.priority
    1113     );
    1114     return STATUS_RESOURCE_IN_USE;
    1115   }
    1116 
    11171114  old_scheduler = _Thread_Scheduler_get_home( the_thread );
    11181115  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
     
    11411138  }
    11421139
    1143   the_thread->Scheduler.home = new_scheduler;
     1140  _Assert( the_thread->Scheduler.pinned_scheduler == NULL );
     1141  the_thread->Scheduler.home_scheduler = new_scheduler;
    11441142
    11451143  _Scheduler_Release_critical( new_scheduler, &lock_context );
  • cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h

    rd8bc0730 r7097962  
    6262    _Scheduler_priority_affinity_SMP_Reconsider_help_request, \
    6363    _Scheduler_priority_affinity_SMP_Withdraw_node, \
     64    _Scheduler_default_Pin_or_unpin, \
     65    _Scheduler_default_Pin_or_unpin, \
    6466    _Scheduler_priority_affinity_SMP_Add_processor, \
    6567    _Scheduler_priority_affinity_SMP_Remove_processor, \
  • cpukit/include/rtems/score/schedulerprioritysmp.h

    rd8bc0730 r7097962  
    88
    99/*
    10  * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
     10 * Copyright (c) 2013, 2018 embedded brains GmbH.  All rights reserved.
    1111 *
    1212 *  embedded brains GmbH
     
    9090    _Scheduler_priority_SMP_Reconsider_help_request, \
    9191    _Scheduler_priority_SMP_Withdraw_node, \
     92    _Scheduler_default_Pin_or_unpin, \
     93    _Scheduler_default_Pin_or_unpin, \
    9294    _Scheduler_priority_SMP_Add_processor, \
    9395    _Scheduler_priority_SMP_Remove_processor, \
  • cpukit/include/rtems/score/schedulersimplesmp.h

    rd8bc0730 r7097962  
    1010 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
    1111 *
    12  *  Copyright (c) 2013, 2016 embedded brains GmbH.
     12 *  Copyright (c) 2013, 2018 embedded brains GmbH.
    1313 *
    1414 *  The license and distribution terms for this file may be
     
    7373    _Scheduler_simple_SMP_Reconsider_help_request, \
    7474    _Scheduler_simple_SMP_Withdraw_node, \
     75    _Scheduler_default_Pin_or_unpin, \
     76    _Scheduler_default_Pin_or_unpin, \
    7577    _Scheduler_simple_SMP_Add_processor, \
    7678    _Scheduler_simple_SMP_Remove_processor, \
  • cpukit/include/rtems/score/schedulersmpimpl.h

    rd8bc0730 r7097962  
    11851185  ISR_lock_Context  lock_context;
    11861186  bool              success;
     1187
     1188  if ( thread->Scheduler.pinned_scheduler != NULL ) {
     1189    /*
     1190     * Pinned threads are not allowed to ask for help.  Return success to break
     1191     * the loop in _Thread_Ask_for_help() early.
     1192     */
     1193    return true;
     1194  }
    11871195
    11881196  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
     
    14751483    ( *enqueue )( context, node, insert_priority );
    14761484  } else {
     1485    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
    14771486    ( *set_affinity )( context, node, arg );
    14781487  }
  • cpukit/include/rtems/score/schedulerstrongapa.h

    rd8bc0730 r7097962  
    88
    99/*
    10  * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
     10 * Copyright (c) 2013, 2018 embedded brains GmbH.  All rights reserved.
    1111 *
    1212 *  embedded brains GmbH
     
    9090    _Scheduler_strong_APA_Reconsider_help_request, \
    9191    _Scheduler_strong_APA_Withdraw_node, \
     92    _Scheduler_default_Pin_or_unpin, \
     93    _Scheduler_default_Pin_or_unpin, \
    9294    _Scheduler_strong_APA_Add_processor, \
    9395    _Scheduler_strong_APA_Remove_processor, \
  • cpukit/include/rtems/score/smpimpl.h

    rd8bc0730 r7097962  
    8080  SMP_FATAL_SHUTDOWN,
    8181  SMP_FATAL_SHUTDOWN_RESPONSE,
    82   SMP_FATAL_START_OF_MANDATORY_PROCESSOR_FAILED
     82  SMP_FATAL_START_OF_MANDATORY_PROCESSOR_FAILED,
     83  SMP_FATAL_SCHEDULER_PIN_OR_UNPIN_NOT_SUPPORTED
    8384} SMP_Fatal_code;
    8485
  • cpukit/include/rtems/score/thread.h

    rd8bc0730 r7097962  
    260260
    261261  /**
    262    * @brief The home scheduler control of this thread.
    263    */
    264   const struct _Scheduler_Control *home;
     262   * @brief The home scheduler of this thread.
     263   */
     264  const struct _Scheduler_Control *home_scheduler;
     265
     266  /**
     267   * @brief The pinned scheduler of this thread.
     268   */
     269  const struct _Scheduler_Control *pinned_scheduler;
    265270
    266271  /**
     
    271276  /**
    272277   * @brief Scheduler nodes immediately available to the thread by its home
    273    * scheduler instance and due to thread queue ownerships.
     278   * scheduler and due to thread queue ownerships.
    274279   *
    275280   * This chain is protected by the thread wait lock.
    276281   *
    277282   * This chain is never empty.  The first scheduler node on the chain is the
    278    * scheduler node of the home scheduler instance.
     283   * scheduler node of the home scheduler.
    279284   */
    280285  Chain_Control Wait_nodes;
     
    286291   * This chain is protected by the thread state lock.
    287292   *
    288    * This chain is never empty.  The first scheduler node on the chain is the
    289    * scheduler node of the home scheduler instance.
     293   * This chain is never empty for normal threads (the only exception are idle
     294   * threads associated with an online processor which is not used by a
     295   * scheduler).  In case a pinned scheduler is set for this thread, then the
     296   * first scheduler node of this chain belongs to the pinned scheduler,
     297   * otherwise the first scheduler node of this chain belongs to the home
     298   * scheduler.
    290299   */
    291300  Chain_Control Scheduler_nodes;
     
    312321   */
    313322  Scheduler_Node *requests;
     323
     324  /**
     325   * @brief The thread pinning to current processor level.
     326   *
     327   * Must be touched only by the executing thread with thread dispatching
     328   * disabled.  If non-zero, then the thread is pinned to its current
     329   * processor.  The pin level is incremented and decremented by two.  The
     330   * least-significant bit indicates that the thread was pre-empted and must
     331   * undo the pinning with respect to the scheduler once the level changes from
     332   * three to one.
     333   *
     334   * The thread pinning may be used to access per-processor data structures in
     335   * critical sections with enabled thread dispatching, e.g. a pinned thread is
     336   * allowed to block.
     337   *
     338   * Thread pinning should be used only for short critical sections and not all
     339   * the time.  Thread pinning is a very low overhead operation in case the
     340   * thread is not preempted during the pinning.
     341   *
     342   * @see _Thread_Pin() and _Thread_Unpin().
     343   */
     344  int pin_level;
    314345
    315346  /**
  • cpukit/include/rtems/score/threadimpl.h

    rd8bc0730 r7097962  
    10261026{
    10271027#if defined(RTEMS_SMP)
    1028   return the_thread->Scheduler.home;
     1028  return the_thread->Scheduler.home_scheduler;
    10291029#else
    10301030  (void) the_thread;
     
    19541954);
    19551955
     1956#if defined(RTEMS_SMP)
     1957#define THREAD_PIN_STEP 2
     1958
     1959#define THREAD_PIN_PREEMPTION 1
     1960
     1961void _Thread_Do_unpin(
     1962  Thread_Control  *executing,
     1963  Per_CPU_Control *cpu_self
     1964);
     1965#endif
     1966
     1967RTEMS_INLINE_ROUTINE void _Thread_Pin( Thread_Control *executing )
     1968{
     1969#if defined(RTEMS_SMP)
     1970  _Assert( executing == _Thread_Executing );
     1971
     1972  executing->Scheduler.pin_level += THREAD_PIN_STEP;
     1973#else
     1974  (void) executing;
     1975#endif
     1976}
     1977
     1978RTEMS_INLINE_ROUTINE void _Thread_Unpin(
     1979  Thread_Control  *executing,
     1980  Per_CPU_Control *cpu_self
     1981)
     1982{
     1983#if defined(RTEMS_SMP)
     1984  unsigned int pin_level;
     1985
     1986  _Assert( executing == _Thread_Executing );
     1987
     1988  pin_level = executing->Scheduler.pin_level;
     1989  _Assert( pin_level > 0 );
     1990
     1991  if (
     1992    RTEMS_PREDICT_TRUE(
     1993      pin_level != ( THREAD_PIN_STEP | THREAD_PIN_PREEMPTION )
     1994    )
     1995  ) {
     1996    executing->Scheduler.pin_level = pin_level - THREAD_PIN_STEP;
     1997  } else {
     1998    _Thread_Do_unpin( executing, cpu_self );
     1999  }
     2000#else
     2001  (void) executing;
     2002  (void) cpu_self;
     2003#endif
     2004}
     2005
    19562006/** @}*/
    19572007
  • cpukit/rtems/src/scheduleraddprocessor.c

    rd8bc0730 r7097962  
    7474    cpu->Scheduler.idle_if_online_and_unused = NULL;
    7575
    76     idle->Scheduler.home = scheduler;
     76    idle->Scheduler.home_scheduler = scheduler;
    7777    idle->Start.initial_priority = idle_priority;
    7878    scheduler_node =
  • cpukit/score/Makefile.am

    rd8bc0730 r7097962  
    2727libscore_a_SOURCES += src/percpustatewait.c
    2828libscore_a_SOURCES += src/profilingsmplock.c
     29libscore_a_SOURCES += src/schedulerdefaultpinunpin.c
    2930libscore_a_SOURCES += src/scheduleredfsmp.c
    3031libscore_a_SOURCES += src/schedulerpriorityaffinitysmp.c
     
    3940libscore_a_SOURCES += src/schedulersmp.c
    4041libscore_a_SOURCES += src/schedulersmpstartidle.c
     42libscore_a_SOURCES += src/threadunpin.c
    4143endif
    4244
  • cpukit/score/src/scheduleredfsmp.c

    rd8bc0730 r7097962  
    144144  Scheduler_EDF_SMP_Node    *highest_ready;
    145145  Scheduler_EDF_SMP_Node    *node;
    146   uint32_t                   rqi;
     146  uint8_t                    rqi;
    147147  const Chain_Node          *tail;
    148148  Chain_Node                *next;
     
    200200static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled(
    201201  const Scheduler_EDF_SMP_Context *self,
    202   uint32_t                         rqi
     202  uint8_t                          rqi
    203203)
    204204{
     
    212212{
    213213  Scheduler_EDF_SMP_Node *filter;
    214   uint32_t                rqi;
     214  uint8_t                 rqi;
    215215
    216216  filter = _Scheduler_EDF_SMP_Node_downcast( filter_base );
     
    241241  Scheduler_EDF_SMP_Context     *self;
    242242  Scheduler_EDF_SMP_Node        *node;
    243   uint32_t                       rqi;
     243  uint8_t                        rqi;
    244244  Scheduler_EDF_SMP_Ready_queue *ready_queue;
    245245  int                            generation_index;
     
    307307  Scheduler_EDF_SMP_Context     *self;
    308308  Scheduler_EDF_SMP_Node        *node;
    309   uint32_t                       rqi;
     309  uint8_t                        rqi;
    310310  Scheduler_EDF_SMP_Ready_queue *ready_queue;
    311311
     
    370370  Scheduler_EDF_SMP_Context     *self;
    371371  Scheduler_EDF_SMP_Node        *scheduled;
    372   uint32_t                       rqi;
     372  uint8_t                        rqi;
    373373
    374374  (void) victim_base;
     
    648648{
    649649  Scheduler_EDF_SMP_Node *node;
    650   const uint32_t         *rqi;
     650  const uint8_t          *rqi;
    651651
    652652  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
     
    673673}
    674674
     675void _Scheduler_EDF_SMP_Pin(
     676  const Scheduler_Control *scheduler,
     677  Thread_Control          *thread,
     678  Scheduler_Node          *node_base,
     679  struct Per_CPU_Control  *cpu
     680)
     681{
     682  Scheduler_EDF_SMP_Node *node;
     683  uint8_t                 rqi;
     684
     685  (void) scheduler;
     686  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
     687  rqi = (uint8_t) _Per_CPU_Get_index( cpu ) + 1;
     688
     689  _Assert(
     690    _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED
     691  );
     692
     693  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
     694  node->ready_queue_index = rqi;
     695  node->pinning_ready_queue_index = rqi;
     696}
     697
     698void _Scheduler_EDF_SMP_Unpin(
     699  const Scheduler_Control *scheduler,
     700  Thread_Control          *thread,
     701  Scheduler_Node          *node_base,
     702  struct Per_CPU_Control  *cpu
     703)
     704{
     705  Scheduler_EDF_SMP_Node *node;
     706
     707  (void) scheduler;
     708  (void) cpu;
     709  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
     710
     711  _Assert(
     712    _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED
     713  );
     714
     715  node->ready_queue_index = node->affinity_ready_queue_index;
     716  node->pinning_ready_queue_index = 0;
     717}
     718
    675719bool _Scheduler_EDF_SMP_Set_affinity(
    676720  const Scheduler_Control *scheduler,
    677721  Thread_Control          *thread,
    678   Scheduler_Node          *node,
     722  Scheduler_Node          *node_base,
    679723  const Processor_mask    *affinity
    680724)
    681725{
    682   Scheduler_Context *context;
    683   Processor_mask     local_affinity;
    684   uint32_t           rqi;
     726  Scheduler_Context      *context;
     727  Scheduler_EDF_SMP_Node *node;
     728  Processor_mask          local_affinity;
     729  uint8_t                 rqi;
    685730
    686731  context = _Scheduler_Get_context( scheduler );
     
    697742  }
    698743
    699   _Scheduler_SMP_Set_affinity(
    700     context,
    701     thread,
    702     node,
    703     &rqi,
    704     _Scheduler_EDF_SMP_Do_set_affinity,
    705     _Scheduler_EDF_SMP_Extract_from_ready,
    706     _Scheduler_EDF_SMP_Get_highest_ready,
    707     _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
    708     _Scheduler_EDF_SMP_Enqueue,
    709     _Scheduler_EDF_SMP_Allocate_processor
    710   );
     744  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
     745  node->affinity_ready_queue_index = rqi;
     746
     747  if ( node->pinning_ready_queue_index == 0 ) {
     748    _Scheduler_SMP_Set_affinity(
     749      context,
     750      thread,
     751      node_base,
     752      &rqi,
     753      _Scheduler_EDF_SMP_Do_set_affinity,
     754      _Scheduler_EDF_SMP_Extract_from_ready,
     755      _Scheduler_EDF_SMP_Get_highest_ready,
     756      _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
     757      _Scheduler_EDF_SMP_Enqueue,
     758      _Scheduler_EDF_SMP_Allocate_processor
     759    );
     760  }
    711761
    712762  return true;
  • cpukit/score/src/threaddispatch.c

    rd8bc0730 r7097962  
    1010 *  On-Line Applications Research Corporation (OAR).
    1111 *
    12  *  Copyright (c) 2014, 2016 embedded brains GmbH.
     12 *  Copyright (c) 2014, 2018 embedded brains GmbH.
    1313 *
    1414 *  The license and distribution terms for this file may be
     
    3838
    3939#if defined(RTEMS_SMP)
     40static ISR_Level _Thread_Check_pinning(
     41  Thread_Control  *executing,
     42  Per_CPU_Control *cpu_self,
     43  ISR_Level        level
     44)
     45{
     46  unsigned int pin_level;
     47
     48  pin_level = executing->Scheduler.pin_level;
     49
     50  if (
     51    RTEMS_PREDICT_FALSE( pin_level != 0 )
     52      && ( pin_level & THREAD_PIN_PREEMPTION ) == 0
     53  ) {
     54    ISR_lock_Context         state_lock_context;
     55    ISR_lock_Context         scheduler_lock_context;
     56    const Scheduler_Control *pinned_scheduler;
     57    Scheduler_Node          *pinned_node;
     58    const Scheduler_Control *home_scheduler;
     59
     60    _ISR_Local_enable( level );
     61
     62    executing->Scheduler.pin_level = pin_level | THREAD_PIN_PREEMPTION;
     63
     64    _Thread_State_acquire( executing, &state_lock_context );
     65
     66    pinned_scheduler = _Scheduler_Get_by_CPU( cpu_self );
     67    pinned_node = _Thread_Scheduler_get_node_by_index(
     68      executing,
     69      _Scheduler_Get_index( pinned_scheduler )
     70    );
     71
     72    if ( _Thread_Is_ready( executing ) ) {
     73      _Scheduler_Block( executing);
     74    }
     75
     76    home_scheduler = _Thread_Scheduler_get_home( executing );
     77    executing->Scheduler.pinned_scheduler = pinned_scheduler;
     78
     79    if ( home_scheduler != pinned_scheduler ) {
     80      _Chain_Extract_unprotected( &pinned_node->Thread.Scheduler_node.Chain );
     81      _Chain_Prepend_unprotected(
     82        &executing->Scheduler.Scheduler_nodes,
     83        &pinned_node->Thread.Scheduler_node.Chain
     84      );
     85    }
     86
     87    _Scheduler_Acquire_critical( pinned_scheduler, &scheduler_lock_context );
     88
     89    ( *pinned_scheduler->Operations.pin )(
     90      pinned_scheduler,
     91      executing,
     92      pinned_node,
     93      cpu_self
     94    );
     95
     96    if ( _Thread_Is_ready( executing ) ) {
     97      ( *pinned_scheduler->Operations.unblock )(
     98        pinned_scheduler,
     99        executing,
     100        pinned_node
     101      );
     102    }
     103
     104    _Scheduler_Release_critical( pinned_scheduler, &scheduler_lock_context );
     105
     106    _Thread_State_release( executing, &state_lock_context );
     107
     108    _ISR_Local_disable( level );
     109  }
     110
     111  return level;
     112}
     113
    40114static void _Thread_Ask_for_help( Thread_Control *the_thread )
    41115{
     
    78152#endif
    79153
    80 static void _Thread_Preemption_intervention( Per_CPU_Control *cpu_self )
     154static ISR_Level _Thread_Preemption_intervention(
     155  Thread_Control  *executing,
     156  Per_CPU_Control *cpu_self,
     157  ISR_Level        level
     158)
    81159{
    82160#if defined(RTEMS_SMP)
     161  level = _Thread_Check_pinning( executing, cpu_self, level );
     162
    83163  _Per_CPU_Acquire( cpu_self );
    84164
     
    103183  (void) cpu_self;
    104184#endif
     185
     186  return level;
    105187}
    106188
     
    193275    Thread_Control *heir;
    194276
    195     _Thread_Preemption_intervention( cpu_self );
     277    level = _Thread_Preemption_intervention( executing, cpu_self, level );
    196278    heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
    197279
  • cpukit/score/src/threadinitialize.c

    rd8bc0730 r7097962  
    246246#if defined(RTEMS_SMP)
    247247  RTEMS_STATIC_ASSERT( THREAD_SCHEDULER_BLOCKED == 0, Scheduler_state );
    248   the_thread->Scheduler.home = scheduler;
     248  the_thread->Scheduler.home_scheduler = scheduler;
    249249  _ISR_lock_Initialize( &the_thread->Scheduler.Lock, "Thread Scheduler" );
    250250  _Processor_mask_Assign(
  • testsuites/smptests/Makefile.am

    rd8bc0730 r7097962  
    623623
    624624if HAS_SMP
     625if TEST_smpthreadpin01
     626smp_tests += smpthreadpin01
     627smp_screens += smpthreadpin01/smpthreadpin01.scn
     628smp_docs += smpthreadpin01/smpthreadpin01.doc
     629smpthreadpin01_SOURCES = smpthreadpin01/init.c
     630smpthreadpin01_CPPFLAGS = $(AM_CPPFLAGS) \
     631        $(TEST_FLAGS_smpthreadpin01) $(support_includes)
     632endif
     633endif
     634
     635if HAS_SMP
    625636if TEST_smpunsupported01
    626637smp_tests += smpunsupported01
  • testsuites/smptests/configure.ac

    rd8bc0730 r7097962  
    8888RTEMS_TEST_CHECK([smpswitchextension01])
    8989RTEMS_TEST_CHECK([smpthreadlife01])
     90RTEMS_TEST_CHECK([smpthreadpin01])
    9091RTEMS_TEST_CHECK([smpunsupported01])
    9192RTEMS_TEST_CHECK([smpwakeafter01])
Note: See TracChangeset for help on using the changeset viewer.