source: rtems/cpukit/score/src/threaddispatch.c @ 2548d14

5
Last change on this file since 2548d14 was 7097962, checked in by Sebastian Huber <sebastian.huber@…>, on 08/29/18 at 07:43:44

score: Add thread pin/unpin support

Add support to temporarily pin a thread to its current processor. This
may be used to access per-processor data structures in critical sections
with enabled thread dispatching, e.g. a pinned thread is allowed to
block.

Update #3508.

  • Property mode set to 100644
File size: 9.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief Dispatch Thread
5 * @ingroup ScoreThread
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2009.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (c) 2014, 2018 embedded brains GmbH.
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#if HAVE_CONFIG_H
20#include "config.h"
21#endif
22
23#include <rtems/score/threaddispatch.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/isr.h>
26#include <rtems/score/schedulerimpl.h>
27#include <rtems/score/threadimpl.h>
28#include <rtems/score/todimpl.h>
29#include <rtems/score/userextimpl.h>
30#include <rtems/score/wkspace.h>
31#include <rtems/config.h>
32
33#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
34Thread_Control *_Thread_Allocated_fp;
35#endif
36
37CHAIN_DEFINE_EMPTY( _User_extensions_Switches_list );
38
39#if defined(RTEMS_SMP)
40static ISR_Level _Thread_Check_pinning(
41  Thread_Control  *executing,
42  Per_CPU_Control *cpu_self,
43  ISR_Level        level
44)
45{
46  unsigned int pin_level;
47
48  pin_level = executing->Scheduler.pin_level;
49
50  if (
51    RTEMS_PREDICT_FALSE( pin_level != 0 )
52      && ( pin_level & THREAD_PIN_PREEMPTION ) == 0
53  ) {
54    ISR_lock_Context         state_lock_context;
55    ISR_lock_Context         scheduler_lock_context;
56    const Scheduler_Control *pinned_scheduler;
57    Scheduler_Node          *pinned_node;
58    const Scheduler_Control *home_scheduler;
59
60    _ISR_Local_enable( level );
61
62    executing->Scheduler.pin_level = pin_level | THREAD_PIN_PREEMPTION;
63
64    _Thread_State_acquire( executing, &state_lock_context );
65
66    pinned_scheduler = _Scheduler_Get_by_CPU( cpu_self );
67    pinned_node = _Thread_Scheduler_get_node_by_index(
68      executing,
69      _Scheduler_Get_index( pinned_scheduler )
70    );
71
72    if ( _Thread_Is_ready( executing ) ) {
73      _Scheduler_Block( executing);
74    }
75
76    home_scheduler = _Thread_Scheduler_get_home( executing );
77    executing->Scheduler.pinned_scheduler = pinned_scheduler;
78
79    if ( home_scheduler != pinned_scheduler ) {
80      _Chain_Extract_unprotected( &pinned_node->Thread.Scheduler_node.Chain );
81      _Chain_Prepend_unprotected(
82        &executing->Scheduler.Scheduler_nodes,
83        &pinned_node->Thread.Scheduler_node.Chain
84      );
85    }
86
87    _Scheduler_Acquire_critical( pinned_scheduler, &scheduler_lock_context );
88
89    ( *pinned_scheduler->Operations.pin )(
90      pinned_scheduler,
91      executing,
92      pinned_node,
93      cpu_self
94    );
95
96    if ( _Thread_Is_ready( executing ) ) {
97      ( *pinned_scheduler->Operations.unblock )(
98        pinned_scheduler,
99        executing,
100        pinned_node
101      );
102    }
103
104    _Scheduler_Release_critical( pinned_scheduler, &scheduler_lock_context );
105
106    _Thread_State_release( executing, &state_lock_context );
107
108    _ISR_Local_disable( level );
109  }
110
111  return level;
112}
113
114static void _Thread_Ask_for_help( Thread_Control *the_thread )
115{
116  Chain_Node       *node;
117  const Chain_Node *tail;
118
119  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
120  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
121
122  do {
123    Scheduler_Node          *scheduler_node;
124    const Scheduler_Control *scheduler;
125    ISR_lock_Context         lock_context;
126    bool                     success;
127
128    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
129    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
130
131    _Scheduler_Acquire_critical( scheduler, &lock_context );
132    success = ( *scheduler->Operations.ask_for_help )(
133      scheduler,
134      the_thread,
135      scheduler_node
136    );
137    _Scheduler_Release_critical( scheduler, &lock_context );
138
139    if ( success ) {
140      break;
141    }
142
143    node = _Chain_Next( node );
144  } while ( node != tail );
145}
146
147static bool _Thread_Can_ask_for_help( const Thread_Control *executing )
148{
149  return executing->Scheduler.helping_nodes > 0
150    && _Thread_Is_ready( executing );
151}
152#endif
153
154static ISR_Level _Thread_Preemption_intervention(
155  Thread_Control  *executing,
156  Per_CPU_Control *cpu_self,
157  ISR_Level        level
158)
159{
160#if defined(RTEMS_SMP)
161  level = _Thread_Check_pinning( executing, cpu_self, level );
162
163  _Per_CPU_Acquire( cpu_self );
164
165  while ( !_Chain_Is_empty( &cpu_self->Threads_in_need_for_help ) ) {
166    Chain_Node       *node;
167    Thread_Control   *the_thread;
168    ISR_lock_Context  lock_context;
169
170    node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help );
171    _Chain_Set_off_chain( node );
172    the_thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
173
174    _Per_CPU_Release( cpu_self );
175    _Thread_State_acquire( the_thread, &lock_context );
176    _Thread_Ask_for_help( the_thread );
177    _Thread_State_release( the_thread, &lock_context );
178    _Per_CPU_Acquire( cpu_self );
179  }
180
181  _Per_CPU_Release( cpu_self );
182#else
183  (void) cpu_self;
184#endif
185
186  return level;
187}
188
189static void _Thread_Post_switch_cleanup( Thread_Control *executing )
190{
191#if defined(RTEMS_SMP)
192  Chain_Node       *node;
193  const Chain_Node *tail;
194
195  if ( !_Thread_Can_ask_for_help( executing ) ) {
196    return;
197  }
198
199  node = _Chain_First( &executing->Scheduler.Scheduler_nodes );
200  tail = _Chain_Immutable_tail( &executing->Scheduler.Scheduler_nodes );
201
202  do {
203    Scheduler_Node          *scheduler_node;
204    const Scheduler_Control *scheduler;
205    ISR_lock_Context         lock_context;
206
207    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
208    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
209
210    _Scheduler_Acquire_critical( scheduler, &lock_context );
211    ( *scheduler->Operations.reconsider_help_request )(
212      scheduler,
213      executing,
214      scheduler_node
215    );
216    _Scheduler_Release_critical( scheduler, &lock_context );
217
218    node = _Chain_Next( node );
219  } while ( node != tail );
220#else
221  (void) executing;
222#endif
223}
224
225static Thread_Action *_Thread_Get_post_switch_action(
226  Thread_Control *executing
227)
228{
229  Chain_Control *chain = &executing->Post_switch_actions.Chain;
230
231  return (Thread_Action *) _Chain_Get_unprotected( chain );
232}
233
234static void _Thread_Run_post_switch_actions( Thread_Control *executing )
235{
236  ISR_lock_Context  lock_context;
237  Thread_Action    *action;
238
239  _Thread_State_acquire( executing, &lock_context );
240  _Thread_Post_switch_cleanup( executing );
241  action = _Thread_Get_post_switch_action( executing );
242
243  while ( action != NULL ) {
244    _Chain_Set_off_chain( &action->Node );
245
246    ( *action->handler )( executing, action, &lock_context );
247
248    _Thread_State_acquire( executing, &lock_context );
249    action = _Thread_Get_post_switch_action( executing );
250  }
251
252  _Thread_State_release( executing, &lock_context );
253}
254
255void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
256{
257  Thread_Control *executing;
258
259  _Assert( cpu_self->thread_dispatch_disable_level == 1 );
260
261#if defined(RTEMS_SCORE_ROBUST_THREAD_DISPATCH)
262  if (
263    !_ISR_Is_enabled( level )
264#if defined(RTEMS_SMP)
265      && rtems_configuration_is_smp_enabled()
266#endif
267  ) {
268    _Internal_error( INTERNAL_ERROR_BAD_THREAD_DISPATCH_ENVIRONMENT );
269  }
270#endif
271
272  executing = cpu_self->executing;
273
274  do {
275    Thread_Control *heir;
276
277    level = _Thread_Preemption_intervention( executing, cpu_self, level );
278    heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
279
280    /*
281     *  When the heir and executing are the same, then we are being
282     *  requested to do the post switch dispatching.  This is normally
283     *  done to dispatch signals.
284     */
285    if ( heir == executing )
286      goto post_switch;
287
288    /*
289     *  Since heir and executing are not the same, we need to do a real
290     *  context switch.
291     */
292    if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
293      heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice();
294
295    _ISR_Local_enable( level );
296
297    _User_extensions_Thread_switch( executing, heir );
298    _Thread_Save_fp( executing );
299    _Context_Switch( &executing->Registers, &heir->Registers );
300    _Thread_Restore_fp( executing );
301
302    /*
303     * We have to obtain this value again after the context switch since the
304     * heir thread may have migrated from another processor.  Values from the
305     * stack or non-volatile registers reflect the old execution environment.
306     */
307    cpu_self = _Per_CPU_Get();
308
309    _ISR_Local_disable( level );
310  } while ( cpu_self->dispatch_necessary );
311
312post_switch:
313  _Assert( cpu_self->thread_dispatch_disable_level == 1 );
314  cpu_self->thread_dispatch_disable_level = 0;
315  _Profiling_Thread_dispatch_enable( cpu_self, 0 );
316
317  _ISR_Local_enable( level );
318
319  _Thread_Run_post_switch_actions( executing );
320}
321
322void _Thread_Dispatch( void )
323{
324  ISR_Level        level;
325  Per_CPU_Control *cpu_self;
326
327  _ISR_Local_disable( level );
328
329  cpu_self = _Per_CPU_Get();
330
331  if ( cpu_self->dispatch_necessary ) {
332    _Profiling_Thread_dispatch_disable( cpu_self, 0 );
333    _Assert( cpu_self->thread_dispatch_disable_level == 0 );
334    cpu_self->thread_dispatch_disable_level = 1;
335    _Thread_Do_dispatch( cpu_self, level );
336  } else {
337    _ISR_Local_enable( level );
338  }
339}
340
341void _Thread_Dispatch_direct( Per_CPU_Control *cpu_self )
342{
343  ISR_Level level;
344
345  if ( cpu_self->thread_dispatch_disable_level != 1 ) {
346    _Internal_error( INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL );
347  }
348
349  _ISR_Local_disable( level );
350  _Thread_Do_dispatch( cpu_self, level );
351}
352
353void _Thread_Dispatch_enable( Per_CPU_Control *cpu_self )
354{
355  uint32_t disable_level = cpu_self->thread_dispatch_disable_level;
356
357  if ( disable_level == 1 ) {
358    ISR_Level level;
359
360    _ISR_Local_disable( level );
361
362    if (
363      cpu_self->dispatch_necessary
364#if defined(RTEMS_SCORE_ROBUST_THREAD_DISPATCH)
365        || !_ISR_Is_enabled( level )
366#endif
367    ) {
368      _Thread_Do_dispatch( cpu_self, level );
369    } else {
370      cpu_self->thread_dispatch_disable_level = 0;
371      _Profiling_Thread_dispatch_enable( cpu_self, 0 );
372      _ISR_Local_enable( level );
373    }
374  } else {
375    _Assert( disable_level > 0 );
376    cpu_self->thread_dispatch_disable_level = disable_level - 1;
377  }
378}
Note: See TracBrowser for help on using the repository browser.