1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @brief Dispatch Thread |
---|
5 | * @ingroup ScoreThread |
---|
6 | */ |
---|
7 | |
---|
8 | /* |
---|
9 | * COPYRIGHT (c) 1989-2009. |
---|
10 | * On-Line Applications Research Corporation (OAR). |
---|
11 | * |
---|
12 | * Copyright (c) 2014, 2016 embedded brains GmbH. |
---|
13 | * |
---|
14 | * The license and distribution terms for this file may be |
---|
15 | * found in the file LICENSE in this distribution or at |
---|
16 | * http://www.rtems.org/license/LICENSE. |
---|
17 | */ |
---|
18 | |
---|
19 | #if HAVE_CONFIG_H |
---|
20 | #include "config.h" |
---|
21 | #endif |
---|
22 | |
---|
23 | #include <rtems/score/threaddispatch.h> |
---|
24 | #include <rtems/score/assert.h> |
---|
25 | #include <rtems/score/isr.h> |
---|
26 | #include <rtems/score/schedulerimpl.h> |
---|
27 | #include <rtems/score/threadimpl.h> |
---|
28 | #include <rtems/score/todimpl.h> |
---|
29 | #include <rtems/score/userextimpl.h> |
---|
30 | #include <rtems/score/wkspace.h> |
---|
31 | #include <rtems/config.h> |
---|
32 | |
---|
33 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
34 | Thread_Control *_Thread_Allocated_fp; |
---|
35 | #endif |
---|
36 | |
---|
37 | CHAIN_DEFINE_EMPTY( _User_extensions_Switches_list ); |
---|
38 | |
---|
39 | #if defined(RTEMS_SMP) |
---|
40 | static void _Thread_Ask_for_help( Thread_Control *the_thread ) |
---|
41 | { |
---|
42 | Chain_Node *node; |
---|
43 | const Chain_Node *tail; |
---|
44 | |
---|
45 | node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes ); |
---|
46 | tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes ); |
---|
47 | |
---|
48 | do { |
---|
49 | Scheduler_Node *scheduler_node; |
---|
50 | const Scheduler_Control *scheduler; |
---|
51 | ISR_lock_Context lock_context; |
---|
52 | bool success; |
---|
53 | |
---|
54 | scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node ); |
---|
55 | scheduler = _Scheduler_Node_get_scheduler( scheduler_node ); |
---|
56 | |
---|
57 | _Scheduler_Acquire_critical( scheduler, &lock_context ); |
---|
58 | success = ( *scheduler->Operations.ask_for_help )( |
---|
59 | scheduler, |
---|
60 | the_thread, |
---|
61 | scheduler_node |
---|
62 | ); |
---|
63 | _Scheduler_Release_critical( scheduler, &lock_context ); |
---|
64 | |
---|
65 | if ( success ) { |
---|
66 | break; |
---|
67 | } |
---|
68 | |
---|
69 | node = _Chain_Next( node ); |
---|
70 | } while ( node != tail ); |
---|
71 | } |
---|
72 | |
---|
73 | static bool _Thread_Can_ask_for_help( const Thread_Control *executing ) |
---|
74 | { |
---|
75 | return executing->Scheduler.helping_nodes > 0 |
---|
76 | && _Thread_Is_ready( executing ); |
---|
77 | } |
---|
78 | #endif |
---|
79 | |
---|
80 | static void _Thread_Preemption_intervention( Per_CPU_Control *cpu_self ) |
---|
81 | { |
---|
82 | #if defined(RTEMS_SMP) |
---|
83 | _Per_CPU_Acquire( cpu_self ); |
---|
84 | |
---|
85 | while ( !_Chain_Is_empty( &cpu_self->Threads_in_need_for_help ) ) { |
---|
86 | Chain_Node *node; |
---|
87 | Thread_Control *the_thread; |
---|
88 | ISR_lock_Context lock_context; |
---|
89 | |
---|
90 | node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help ); |
---|
91 | _Chain_Set_off_chain( node ); |
---|
92 | the_thread = THREAD_OF_SCHEDULER_HELP_NODE( node ); |
---|
93 | |
---|
94 | _Per_CPU_Release( cpu_self ); |
---|
95 | _Thread_State_acquire( the_thread, &lock_context ); |
---|
96 | _Thread_Ask_for_help( the_thread ); |
---|
97 | _Thread_State_release( the_thread, &lock_context ); |
---|
98 | _Per_CPU_Acquire( cpu_self ); |
---|
99 | } |
---|
100 | |
---|
101 | _Per_CPU_Release( cpu_self ); |
---|
102 | #else |
---|
103 | (void) cpu_self; |
---|
104 | #endif |
---|
105 | } |
---|
106 | |
---|
107 | static void _Thread_Post_switch_cleanup( Thread_Control *executing ) |
---|
108 | { |
---|
109 | #if defined(RTEMS_SMP) |
---|
110 | Chain_Node *node; |
---|
111 | const Chain_Node *tail; |
---|
112 | |
---|
113 | if ( !_Thread_Can_ask_for_help( executing ) ) { |
---|
114 | return; |
---|
115 | } |
---|
116 | |
---|
117 | node = _Chain_First( &executing->Scheduler.Scheduler_nodes ); |
---|
118 | tail = _Chain_Immutable_tail( &executing->Scheduler.Scheduler_nodes ); |
---|
119 | |
---|
120 | do { |
---|
121 | Scheduler_Node *scheduler_node; |
---|
122 | const Scheduler_Control *scheduler; |
---|
123 | ISR_lock_Context lock_context; |
---|
124 | |
---|
125 | scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node ); |
---|
126 | scheduler = _Scheduler_Node_get_scheduler( scheduler_node ); |
---|
127 | |
---|
128 | _Scheduler_Acquire_critical( scheduler, &lock_context ); |
---|
129 | ( *scheduler->Operations.reconsider_help_request )( |
---|
130 | scheduler, |
---|
131 | executing, |
---|
132 | scheduler_node |
---|
133 | ); |
---|
134 | _Scheduler_Release_critical( scheduler, &lock_context ); |
---|
135 | |
---|
136 | node = _Chain_Next( node ); |
---|
137 | } while ( node != tail ); |
---|
138 | #else |
---|
139 | (void) executing; |
---|
140 | #endif |
---|
141 | } |
---|
142 | |
---|
143 | static Thread_Action *_Thread_Get_post_switch_action( |
---|
144 | Thread_Control *executing |
---|
145 | ) |
---|
146 | { |
---|
147 | Chain_Control *chain = &executing->Post_switch_actions.Chain; |
---|
148 | |
---|
149 | return (Thread_Action *) _Chain_Get_unprotected( chain ); |
---|
150 | } |
---|
151 | |
---|
152 | static void _Thread_Run_post_switch_actions( Thread_Control *executing ) |
---|
153 | { |
---|
154 | ISR_lock_Context lock_context; |
---|
155 | Thread_Action *action; |
---|
156 | |
---|
157 | _Thread_State_acquire( executing, &lock_context ); |
---|
158 | _Thread_Post_switch_cleanup( executing ); |
---|
159 | action = _Thread_Get_post_switch_action( executing ); |
---|
160 | |
---|
161 | while ( action != NULL ) { |
---|
162 | _Chain_Set_off_chain( &action->Node ); |
---|
163 | |
---|
164 | ( *action->handler )( executing, action, &lock_context ); |
---|
165 | |
---|
166 | _Thread_State_acquire( executing, &lock_context ); |
---|
167 | action = _Thread_Get_post_switch_action( executing ); |
---|
168 | } |
---|
169 | |
---|
170 | _Thread_State_release( executing, &lock_context ); |
---|
171 | } |
---|
172 | |
---|
173 | void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level ) |
---|
174 | { |
---|
175 | Thread_Control *executing; |
---|
176 | |
---|
177 | _Assert( cpu_self->thread_dispatch_disable_level == 1 ); |
---|
178 | |
---|
179 | #if defined(RTEMS_SCORE_ROBUST_THREAD_DISPATCH) |
---|
180 | if ( |
---|
181 | !_ISR_Is_enabled( level ) |
---|
182 | #if defined(RTEMS_SMP) |
---|
183 | && rtems_configuration_is_smp_enabled() |
---|
184 | #endif |
---|
185 | ) { |
---|
186 | _Internal_error( INTERNAL_ERROR_BAD_THREAD_DISPATCH_ENVIRONMENT ); |
---|
187 | } |
---|
188 | #endif |
---|
189 | |
---|
190 | executing = cpu_self->executing; |
---|
191 | |
---|
192 | do { |
---|
193 | Thread_Control *heir; |
---|
194 | |
---|
195 | _Thread_Preemption_intervention( cpu_self ); |
---|
196 | heir = _Thread_Get_heir_and_make_it_executing( cpu_self ); |
---|
197 | |
---|
198 | /* |
---|
199 | * When the heir and executing are the same, then we are being |
---|
200 | * requested to do the post switch dispatching. This is normally |
---|
201 | * done to dispatch signals. |
---|
202 | */ |
---|
203 | if ( heir == executing ) |
---|
204 | goto post_switch; |
---|
205 | |
---|
206 | /* |
---|
207 | * Since heir and executing are not the same, we need to do a real |
---|
208 | * context switch. |
---|
209 | */ |
---|
210 | if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE ) |
---|
211 | heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); |
---|
212 | |
---|
213 | _ISR_Local_enable( level ); |
---|
214 | |
---|
215 | _User_extensions_Thread_switch( executing, heir ); |
---|
216 | _Thread_Save_fp( executing ); |
---|
217 | _Context_Switch( &executing->Registers, &heir->Registers ); |
---|
218 | _Thread_Restore_fp( executing ); |
---|
219 | |
---|
220 | /* |
---|
221 | * We have to obtain this value again after the context switch since the |
---|
222 | * heir thread may have migrated from another processor. Values from the |
---|
223 | * stack or non-volatile registers reflect the old execution environment. |
---|
224 | */ |
---|
225 | cpu_self = _Per_CPU_Get(); |
---|
226 | |
---|
227 | _ISR_Local_disable( level ); |
---|
228 | } while ( cpu_self->dispatch_necessary ); |
---|
229 | |
---|
230 | post_switch: |
---|
231 | _Assert( cpu_self->thread_dispatch_disable_level == 1 ); |
---|
232 | cpu_self->thread_dispatch_disable_level = 0; |
---|
233 | _Profiling_Thread_dispatch_enable( cpu_self, 0 ); |
---|
234 | |
---|
235 | _ISR_Local_enable( level ); |
---|
236 | |
---|
237 | _Thread_Run_post_switch_actions( executing ); |
---|
238 | } |
---|
239 | |
---|
240 | void _Thread_Dispatch( void ) |
---|
241 | { |
---|
242 | ISR_Level level; |
---|
243 | Per_CPU_Control *cpu_self; |
---|
244 | |
---|
245 | _ISR_Local_disable( level ); |
---|
246 | |
---|
247 | cpu_self = _Per_CPU_Get(); |
---|
248 | |
---|
249 | if ( cpu_self->dispatch_necessary ) { |
---|
250 | _Profiling_Thread_dispatch_disable( cpu_self, 0 ); |
---|
251 | _Assert( cpu_self->thread_dispatch_disable_level == 0 ); |
---|
252 | cpu_self->thread_dispatch_disable_level = 1; |
---|
253 | _Thread_Do_dispatch( cpu_self, level ); |
---|
254 | } else { |
---|
255 | _ISR_Local_enable( level ); |
---|
256 | } |
---|
257 | } |
---|
258 | |
---|
259 | void _Thread_Dispatch_direct( Per_CPU_Control *cpu_self ) |
---|
260 | { |
---|
261 | ISR_Level level; |
---|
262 | |
---|
263 | if ( cpu_self->thread_dispatch_disable_level != 1 ) { |
---|
264 | _Internal_error( INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL ); |
---|
265 | } |
---|
266 | |
---|
267 | _ISR_Local_disable( level ); |
---|
268 | _Thread_Do_dispatch( cpu_self, level ); |
---|
269 | } |
---|