source: rtems/cpukit/score/src/threaddispatch.c @ 351c14d

5
Last change on this file since 351c14d was 351c14d, checked in by Sebastian Huber <sebastian.huber@…>, on 09/27/16 at 09:33:36

score: Add new SMP scheduler helping protocol

Update #2556.

  • Property mode set to 100644
File size: 6.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Dispatch Thread
5 * @ingroup ScoreThread
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2009.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (c) 2014, 2016 embedded brains GmbH.
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#if HAVE_CONFIG_H
20#include "config.h"
21#endif
22
23#include <rtems/score/threaddispatch.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/isr.h>
26#include <rtems/score/schedulerimpl.h>
27#include <rtems/score/threadimpl.h>
28#include <rtems/score/todimpl.h>
29#include <rtems/score/userextimpl.h>
30#include <rtems/score/wkspace.h>
31#include <rtems/config.h>
32
33#if __RTEMS_ADA__
34void *rtems_ada_self;
35#endif
36
37#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
38Thread_Control *_Thread_Allocated_fp;
39#endif
40
41CHAIN_DEFINE_EMPTY( _User_extensions_Switches_list );
42
43#if defined(RTEMS_SMP)
44static bool _Thread_Can_ask_for_help( const Thread_Control *executing )
45{
46  return executing->Scheduler.helping_nodes > 0
47    && _Thread_Is_ready( executing );
48}
49#endif
50
51static void _Thread_Preemption_intervention( Per_CPU_Control *cpu_self )
52{
53#if defined(RTEMS_SMP)
54  _Per_CPU_Acquire( cpu_self );
55
56  while ( !_Chain_Is_empty( &cpu_self->Threads_in_need_for_help ) ) {
57    Chain_Node       *node;
58    Thread_Control   *the_thread;
59    ISR_lock_Context  lock_context;
60
61    node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help );
62    _Chain_Set_off_chain( node );
63    the_thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
64
65    _Per_CPU_Release( cpu_self );
66    _Thread_State_acquire( the_thread, &lock_context );
67    _Thread_Scheduler_ask_for_help( the_thread );
68    _Thread_State_release( the_thread, &lock_context );
69    _Per_CPU_Acquire( cpu_self );
70  }
71
72  _Per_CPU_Release( cpu_self );
73#else
74  (void) cpu_self;
75#endif
76}
77
78static void _Thread_Post_switch_cleanup( Thread_Control *executing )
79{
80#if defined(RTEMS_SMP)
81  Chain_Node       *node;
82  const Chain_Node *tail;
83
84  if ( !_Thread_Can_ask_for_help( executing ) ) {
85    return;
86  }
87
88  node = _Chain_First( &executing->Scheduler.Scheduler_nodes );
89  tail = _Chain_Immutable_tail( &executing->Scheduler.Scheduler_nodes );
90
91  do {
92    Scheduler_Node          *scheduler_node;
93    const Scheduler_Control *scheduler;
94    ISR_lock_Context         lock_context;
95
96    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
97    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
98
99    _Scheduler_Acquire_critical( scheduler, &lock_context );
100    ( *scheduler->Operations.reconsider_help_request )(
101      scheduler,
102      executing,
103      scheduler_node
104    );
105    _Scheduler_Release_critical( scheduler, &lock_context );
106
107    node = _Chain_Next( node );
108  } while ( node != tail );
109#else
110  (void) executing;
111#endif
112}
113
114static Thread_Action *_Thread_Get_post_switch_action(
115  Thread_Control *executing
116)
117{
118  Chain_Control *chain = &executing->Post_switch_actions.Chain;
119
120  return (Thread_Action *) _Chain_Get_unprotected( chain );
121}
122
123static void _Thread_Run_post_switch_actions( Thread_Control *executing )
124{
125  ISR_lock_Context  lock_context;
126  Thread_Action    *action;
127
128  _Thread_State_acquire( executing, &lock_context );
129  _Thread_Post_switch_cleanup( executing );
130  action = _Thread_Get_post_switch_action( executing );
131
132  while ( action != NULL ) {
133    _Chain_Set_off_chain( &action->Node );
134
135    ( *action->handler )( executing, action, &lock_context );
136
137    _Thread_State_acquire( executing, &lock_context );
138    action = _Thread_Get_post_switch_action( executing );
139  }
140
141  _Thread_State_release( executing, &lock_context );
142}
143
144void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
145{
146  Thread_Control *executing;
147
148  _Assert( cpu_self->thread_dispatch_disable_level == 1 );
149
150  executing = cpu_self->executing;
151
152  do {
153    Thread_Control *heir;
154
155    _Thread_Preemption_intervention( cpu_self );
156    heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
157
158    /*
159     *  When the heir and executing are the same, then we are being
160     *  requested to do the post switch dispatching.  This is normally
161     *  done to dispatch signals.
162     */
163    if ( heir == executing )
164      goto post_switch;
165
166    /*
167     *  Since heir and executing are not the same, we need to do a real
168     *  context switch.
169     */
170#if __RTEMS_ADA__
171    executing->rtems_ada_self = rtems_ada_self;
172    rtems_ada_self = heir->rtems_ada_self;
173#endif
174    if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
175      heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice();
176
177    /*
178     * On SMP the complete context switch must be atomic with respect to one
179     * processor.  See also _Thread_Handler() since _Context_switch() may branch
180     * to this function.
181     */
182#if !defined( RTEMS_SMP )
183    _ISR_Local_enable( level );
184#endif
185
186    _User_extensions_Thread_switch( executing, heir );
187    _Thread_Save_fp( executing );
188    _Context_Switch( &executing->Registers, &heir->Registers );
189    _Thread_Restore_fp( executing );
190
191    /*
192     * We have to obtain this value again after the context switch since the
193     * heir thread may have migrated from another processor.  Values from the
194     * stack or non-volatile registers reflect the old execution environment.
195     */
196    cpu_self = _Per_CPU_Get();
197
198#if !defined( RTEMS_SMP )
199    _ISR_Local_disable( level );
200#endif
201  } while (
202#if defined( RTEMS_SMP )
203    false
204#else
205    cpu_self->dispatch_necessary
206#endif
207  );
208
209post_switch:
210  _Assert( cpu_self->thread_dispatch_disable_level == 1 );
211  cpu_self->thread_dispatch_disable_level = 0;
212  _Profiling_Thread_dispatch_enable( cpu_self, 0 );
213
214  _ISR_Local_enable( level );
215
216  _Thread_Run_post_switch_actions( executing );
217}
218
219void _Thread_Dispatch( void )
220{
221  ISR_Level        level;
222  Per_CPU_Control *cpu_self;
223
224  _ISR_Local_disable( level );
225
226  cpu_self = _Per_CPU_Get();
227
228  if ( cpu_self->dispatch_necessary ) {
229    _Profiling_Thread_dispatch_disable( cpu_self, 0 );
230    cpu_self->thread_dispatch_disable_level = 1;
231    _Thread_Do_dispatch( cpu_self, level );
232  } else {
233    _ISR_Local_enable( level );
234  }
235}
Note: See TracBrowser for help on using the repository browser.