source: rtems/cpukit/score/src/threaddispatch.c

Last change on this file was bcef89f2, checked in by Sebastian Huber <sebastian.huber@…>, on 05/19/23 at 06:18:25

Update company name

The embedded brains GmbH & Co. KG is the legal successor of embedded
brains GmbH.

  • Property mode set to 100644
File size: 11.2 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSScoreThread
7 *
8 * @brief This source file contains the definition of ::_Thread_Allocated_fp
9 *   and ::_User_extensions_Switches_list and the implementation of
10 *   _Thread_Dispatch_direct(), _Thread_Dispatch_enable(),
11 *   and _Thread_Do_dispatch().
12 */
13
14/*
15 *  COPYRIGHT (c) 1989-2009.
16 *  On-Line Applications Research Corporation (OAR).
17 *
18 *  Copyright (C) 2014, 2018 embedded brains GmbH & Co. KG
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 * 1. Redistributions of source code must retain the above copyright
24 *    notice, this list of conditions and the following disclaimer.
25 * 2. Redistributions in binary form must reproduce the above copyright
26 *    notice, this list of conditions and the following disclaimer in the
27 *    documentation and/or other materials provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifdef HAVE_CONFIG_H
43#include "config.h"
44#endif
45
46#include <rtems/score/threaddispatch.h>
47#include <rtems/score/assert.h>
48#include <rtems/score/isr.h>
49#include <rtems/score/schedulerimpl.h>
50#include <rtems/score/threadimpl.h>
51#include <rtems/score/todimpl.h>
52#include <rtems/score/userextimpl.h>
53#include <rtems/config.h>
54
55#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
56Thread_Control *_Thread_Allocated_fp;
57#endif
58
59CHAIN_DEFINE_EMPTY( _User_extensions_Switches_list );
60
61#if defined(RTEMS_SMP)
62static ISR_Level _Thread_Check_pinning(
63  Thread_Control  *executing,
64  Per_CPU_Control *cpu_self,
65  ISR_Level        level
66)
67{
68  unsigned int pin_level;
69
70  pin_level = executing->Scheduler.pin_level;
71
72  if (
73    RTEMS_PREDICT_FALSE( pin_level != 0 )
74      && ( pin_level & THREAD_PIN_PREEMPTION ) == 0
75  ) {
76    ISR_lock_Context         state_lock_context;
77    ISR_lock_Context         scheduler_lock_context;
78    const Scheduler_Control *pinned_scheduler;
79    Scheduler_Node          *pinned_node;
80    const Scheduler_Control *home_scheduler;
81
82    _ISR_Local_enable( level );
83
84    executing->Scheduler.pin_level = pin_level | THREAD_PIN_PREEMPTION;
85
86    _Thread_State_acquire( executing, &state_lock_context );
87
88    pinned_scheduler = _Scheduler_Get_by_CPU( cpu_self );
89    pinned_node = _Thread_Scheduler_get_node_by_index(
90      executing,
91      _Scheduler_Get_index( pinned_scheduler )
92    );
93
94    if ( _Thread_Is_ready( executing ) ) {
95      _Scheduler_Block( executing);
96    }
97
98    home_scheduler = _Thread_Scheduler_get_home( executing );
99    executing->Scheduler.pinned_scheduler = pinned_scheduler;
100
101    if ( home_scheduler != pinned_scheduler ) {
102      _Chain_Extract_unprotected( &pinned_node->Thread.Scheduler_node.Chain );
103      _Chain_Prepend_unprotected(
104        &executing->Scheduler.Scheduler_nodes,
105        &pinned_node->Thread.Scheduler_node.Chain
106      );
107    }
108
109    _Scheduler_Acquire_critical( pinned_scheduler, &scheduler_lock_context );
110
111    ( *pinned_scheduler->Operations.pin )(
112      pinned_scheduler,
113      executing,
114      pinned_node,
115      cpu_self
116    );
117
118    if ( _Thread_Is_ready( executing ) ) {
119      ( *pinned_scheduler->Operations.unblock )(
120        pinned_scheduler,
121        executing,
122        pinned_node
123      );
124    }
125
126    _Scheduler_Release_critical( pinned_scheduler, &scheduler_lock_context );
127
128    _Thread_State_release( executing, &state_lock_context );
129
130    _ISR_Local_disable( level );
131  }
132
133  return level;
134}
135
136static void _Thread_Ask_for_help( Thread_Control *the_thread )
137{
138  Chain_Node       *node;
139  const Chain_Node *tail;
140
141  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
142  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
143
144  do {
145    Scheduler_Node          *scheduler_node;
146    const Scheduler_Control *scheduler;
147    ISR_lock_Context         lock_context;
148    bool                     success;
149
150    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
151    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
152
153    _Scheduler_Acquire_critical( scheduler, &lock_context );
154    success = ( *scheduler->Operations.ask_for_help )(
155      scheduler,
156      the_thread,
157      scheduler_node
158    );
159    _Scheduler_Release_critical( scheduler, &lock_context );
160
161    if ( success ) {
162      break;
163    }
164
165    node = _Chain_Next( node );
166  } while ( node != tail );
167}
168
169static bool _Thread_Can_ask_for_help( const Thread_Control *executing )
170{
171  return executing->Scheduler.helping_nodes > 0
172    && _Thread_Is_ready( executing );
173}
174#endif
175
176static ISR_Level _Thread_Preemption_intervention(
177  Thread_Control  *executing,
178  Per_CPU_Control *cpu_self,
179  ISR_Level        level
180)
181{
182#if defined(RTEMS_SMP)
183  ISR_lock_Context lock_context;
184
185  level = _Thread_Check_pinning( executing, cpu_self, level );
186
187  _Per_CPU_Acquire( cpu_self, &lock_context );
188
189  while ( !_Chain_Is_empty( &cpu_self->Threads_in_need_for_help ) ) {
190    Chain_Node     *node;
191    Thread_Control *the_thread;
192
193    node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help );
194    the_thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
195    the_thread->Scheduler.ask_for_help_cpu = NULL;
196
197    _Per_CPU_Release( cpu_self, &lock_context );
198
199    _Thread_State_acquire( the_thread, &lock_context );
200    _Thread_Ask_for_help( the_thread );
201    _Thread_State_release( the_thread, &lock_context );
202
203    _Per_CPU_Acquire( cpu_self, &lock_context );
204  }
205
206  _Per_CPU_Release( cpu_self, &lock_context );
207#else
208  (void) cpu_self;
209#endif
210
211  return level;
212}
213
214static void _Thread_Post_switch_cleanup( Thread_Control *executing )
215{
216#if defined(RTEMS_SMP)
217  Chain_Node       *node;
218  const Chain_Node *tail;
219
220  if ( !_Thread_Can_ask_for_help( executing ) ) {
221    return;
222  }
223
224  node = _Chain_First( &executing->Scheduler.Scheduler_nodes );
225  tail = _Chain_Immutable_tail( &executing->Scheduler.Scheduler_nodes );
226
227  do {
228    Scheduler_Node          *scheduler_node;
229    const Scheduler_Control *scheduler;
230    ISR_lock_Context         lock_context;
231
232    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
233    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
234
235    _Scheduler_Acquire_critical( scheduler, &lock_context );
236    ( *scheduler->Operations.reconsider_help_request )(
237      scheduler,
238      executing,
239      scheduler_node
240    );
241    _Scheduler_Release_critical( scheduler, &lock_context );
242
243    node = _Chain_Next( node );
244  } while ( node != tail );
245#else
246  (void) executing;
247#endif
248}
249
250static Thread_Action *_Thread_Get_post_switch_action(
251  Thread_Control *executing
252)
253{
254  Chain_Control *chain = &executing->Post_switch_actions.Chain;
255
256  return (Thread_Action *) _Chain_Get_unprotected( chain );
257}
258
259static void _Thread_Run_post_switch_actions( Thread_Control *executing )
260{
261  ISR_lock_Context  lock_context;
262  Thread_Action    *action;
263
264  _Thread_State_acquire( executing, &lock_context );
265  _Thread_Post_switch_cleanup( executing );
266  action = _Thread_Get_post_switch_action( executing );
267
268  while ( action != NULL ) {
269    _Chain_Set_off_chain( &action->Node );
270    ( *action->handler )( executing, action, &lock_context );
271    action = _Thread_Get_post_switch_action( executing );
272  }
273
274  _Thread_State_release( executing, &lock_context );
275}
276
277void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
278{
279  Thread_Control *executing;
280
281  _Assert( cpu_self->thread_dispatch_disable_level == 1 );
282
283#if defined(RTEMS_SCORE_ROBUST_THREAD_DISPATCH)
284  if (
285    !_ISR_Is_enabled( level )
286#if defined(RTEMS_SMP) && CPU_ENABLE_ROBUST_THREAD_DISPATCH == FALSE
287      && _SMP_Need_inter_processor_interrupts()
288#endif
289  ) {
290    _Internal_error( INTERNAL_ERROR_BAD_THREAD_DISPATCH_ENVIRONMENT );
291  }
292#endif
293
294  executing = cpu_self->executing;
295
296  do {
297    Thread_Control                     *heir;
298    const Thread_CPU_budget_operations *cpu_budget_operations;
299
300    level = _Thread_Preemption_intervention( executing, cpu_self, level );
301    heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
302
303    /*
304     * If the heir and executing are the same, then there is no need to do a
305     * context switch.  Proceed to run the post switch actions.  This is
306     * normally done to dispatch signals.
307     */
308    if ( heir == executing ) {
309      break;
310    }
311
312    /*
313     *  Since heir and executing are not the same, we need to do a real
314     *  context switch.
315     */
316
317    cpu_budget_operations = heir->CPU_budget.operations;
318
319    if ( cpu_budget_operations != NULL ) {
320      ( *cpu_budget_operations->at_context_switch )( heir );
321    }
322
323    _ISR_Local_enable( level );
324
325#if !defined(RTEMS_SMP)
326    _User_extensions_Thread_switch( executing, heir );
327#endif
328    _Thread_Save_fp( executing );
329    _Context_Switch( &executing->Registers, &heir->Registers );
330    _Thread_Restore_fp( executing );
331#if defined(RTEMS_SMP)
332    _User_extensions_Thread_switch( NULL, executing );
333#endif
334
335    /*
336     * We have to obtain this value again after the context switch since the
337     * heir thread may have migrated from another processor.  Values from the
338     * stack or non-volatile registers reflect the old execution environment.
339     */
340    cpu_self = _Per_CPU_Get();
341
342    _ISR_Local_disable( level );
343  } while ( cpu_self->dispatch_necessary );
344
345  /*
346   * We are done with context switching.  Proceed to run the post switch
347   * actions.
348   */
349
350  _Assert( cpu_self->thread_dispatch_disable_level == 1 );
351  cpu_self->thread_dispatch_disable_level = 0;
352  _Profiling_Thread_dispatch_enable( cpu_self, 0 );
353
354  _ISR_Local_enable( level );
355
356  _Thread_Run_post_switch_actions( executing );
357}
358
359void _Thread_Dispatch_direct( Per_CPU_Control *cpu_self )
360{
361  ISR_Level level;
362
363  if ( cpu_self->thread_dispatch_disable_level != 1 ) {
364    _Internal_error( INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL );
365  }
366
367  _ISR_Local_disable( level );
368  _Thread_Do_dispatch( cpu_self, level );
369}
370
371RTEMS_ALIAS( _Thread_Dispatch_direct ) void
372_Thread_Dispatch_direct_no_return( Per_CPU_Control * );
373
374void _Thread_Dispatch_enable( Per_CPU_Control *cpu_self )
375{
376  uint32_t disable_level = cpu_self->thread_dispatch_disable_level;
377
378  if ( disable_level == 1 ) {
379    ISR_Level level;
380
381    _ISR_Local_disable( level );
382
383    if (
384      cpu_self->dispatch_necessary
385#if defined(RTEMS_SCORE_ROBUST_THREAD_DISPATCH)
386        || !_ISR_Is_enabled( level )
387#endif
388    ) {
389      _Thread_Do_dispatch( cpu_self, level );
390    } else {
391      cpu_self->thread_dispatch_disable_level = 0;
392      _Profiling_Thread_dispatch_enable( cpu_self, 0 );
393      _ISR_Local_enable( level );
394    }
395  } else {
396    _Assert( disable_level > 0 );
397    cpu_self->thread_dispatch_disable_level = disable_level - 1;
398  }
399}
Note: See TracBrowser for help on using the repository browser.