1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @ingroup RTEMSScoreThread |
---|
5 | * |
---|
6 | * @brief This source file contains the implementation of |
---|
7 | * _Thread_Initialize(). |
---|
8 | */ |
---|
9 | |
---|
10 | /* |
---|
11 | * COPYRIGHT (c) 1989-2014. |
---|
12 | * On-Line Applications Research Corporation (OAR). |
---|
13 | * |
---|
14 | * The license and distribution terms for this file may be |
---|
15 | * found in the file LICENSE in this distribution or at |
---|
16 | * http://www.rtems.org/license/LICENSE. |
---|
17 | */ |
---|
18 | |
---|
19 | #ifdef HAVE_CONFIG_H |
---|
20 | #include "config.h" |
---|
21 | #endif |
---|
22 | |
---|
23 | #include <rtems/score/threadimpl.h> |
---|
24 | #include <rtems/score/freechainimpl.h> |
---|
25 | #include <rtems/score/schedulerimpl.h> |
---|
26 | #include <rtems/score/stackimpl.h> |
---|
27 | #include <rtems/score/tls.h> |
---|
28 | #include <rtems/score/userextimpl.h> |
---|
29 | #include <rtems/score/watchdogimpl.h> |
---|
30 | #include <rtems/config.h> |
---|
31 | |
---|
32 | void _Thread_Free( |
---|
33 | Thread_Information *information, |
---|
34 | Thread_Control *the_thread |
---|
35 | ) |
---|
36 | { |
---|
37 | #if defined(RTEMS_SMP) |
---|
38 | Scheduler_Node *scheduler_node; |
---|
39 | size_t scheduler_index; |
---|
40 | #endif |
---|
41 | |
---|
42 | _User_extensions_Thread_delete( the_thread ); |
---|
43 | _User_extensions_Destroy_iterators( the_thread ); |
---|
44 | _ISR_lock_Destroy( &the_thread->Keys.Lock ); |
---|
45 | |
---|
46 | #if defined(RTEMS_SMP) |
---|
47 | scheduler_node = the_thread->Scheduler.nodes; |
---|
48 | scheduler_index = 0; |
---|
49 | |
---|
50 | while ( scheduler_index < _Scheduler_Count ) { |
---|
51 | _Scheduler_Node_destroy( |
---|
52 | &_Scheduler_Table[ scheduler_index ], |
---|
53 | scheduler_node |
---|
54 | ); |
---|
55 | scheduler_node = (Scheduler_Node *) |
---|
56 | ( (uintptr_t) scheduler_node + _Scheduler_Node_size ); |
---|
57 | ++scheduler_index; |
---|
58 | } |
---|
59 | #else |
---|
60 | _Scheduler_Node_destroy( |
---|
61 | _Thread_Scheduler_get_home( the_thread ), |
---|
62 | _Thread_Scheduler_get_home_node( the_thread ) |
---|
63 | ); |
---|
64 | #endif |
---|
65 | |
---|
66 | _ISR_lock_Destroy( &the_thread->Timer.Lock ); |
---|
67 | |
---|
68 | /* |
---|
69 | * The thread might have been FP. So deal with that. |
---|
70 | */ |
---|
71 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
72 | #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) |
---|
73 | if ( _Thread_Is_allocated_fp( the_thread ) ) |
---|
74 | _Thread_Deallocate_fp(); |
---|
75 | #endif |
---|
76 | #endif |
---|
77 | |
---|
78 | _Freechain_Push( |
---|
79 | &information->Thread_queue_heads.Free, |
---|
80 | the_thread->Wait.spare_heads |
---|
81 | ); |
---|
82 | |
---|
83 | /* |
---|
84 | * Free the rest of the memory associated with this task |
---|
85 | * and set the associated pointers to NULL for safety. |
---|
86 | */ |
---|
87 | ( *the_thread->Start.stack_free )( the_thread->Start.Initial_stack.area ); |
---|
88 | |
---|
89 | #if defined(RTEMS_SMP) |
---|
90 | _ISR_lock_Destroy( &the_thread->Scheduler.Lock ); |
---|
91 | _ISR_lock_Destroy( &the_thread->Wait.Lock.Default ); |
---|
92 | _SMP_lock_Stats_destroy( &the_thread->Potpourri_stats ); |
---|
93 | #endif |
---|
94 | |
---|
95 | _Thread_queue_Destroy( &the_thread->Join_queue ); |
---|
96 | _Context_Destroy( the_thread, &the_thread->Registers ); |
---|
97 | _Objects_Free( &information->Objects, &the_thread->Object ); |
---|
98 | } |
---|
99 | |
---|
100 | static bool _Thread_Try_initialize( |
---|
101 | Thread_Information *information, |
---|
102 | Thread_Control *the_thread, |
---|
103 | const Thread_Configuration *config |
---|
104 | ) |
---|
105 | { |
---|
106 | uintptr_t tls_size; |
---|
107 | size_t i; |
---|
108 | char *stack_begin; |
---|
109 | char *stack_end; |
---|
110 | uintptr_t stack_align; |
---|
111 | Scheduler_Node *scheduler_node; |
---|
112 | #if defined(RTEMS_SMP) |
---|
113 | Scheduler_Node *scheduler_node_for_index; |
---|
114 | const Scheduler_Control *scheduler_for_index; |
---|
115 | size_t scheduler_index; |
---|
116 | #endif |
---|
117 | Per_CPU_Control *cpu = _Per_CPU_Get_by_index( 0 ); |
---|
118 | |
---|
119 | memset( |
---|
120 | &the_thread->Join_queue, |
---|
121 | 0, |
---|
122 | information->Objects.object_size - offsetof( Thread_Control, Join_queue ) |
---|
123 | ); |
---|
124 | |
---|
125 | for ( i = 0 ; i < _Thread_Control_add_on_count ; ++i ) { |
---|
126 | const Thread_Control_add_on *add_on = &_Thread_Control_add_ons[ i ]; |
---|
127 | |
---|
128 | *(void **) ( (char *) the_thread + add_on->destination_offset ) = |
---|
129 | (char *) the_thread + add_on->source_offset; |
---|
130 | } |
---|
131 | |
---|
132 | /* Set up the properly aligned stack area begin and end */ |
---|
133 | stack_begin = config->stack_area; |
---|
134 | stack_end = stack_begin + config->stack_size; |
---|
135 | stack_align = CPU_STACK_ALIGNMENT; |
---|
136 | stack_begin = (char *) RTEMS_ALIGN_UP( (uintptr_t) stack_begin, stack_align ); |
---|
137 | stack_end = (char *) RTEMS_ALIGN_DOWN( (uintptr_t) stack_end, stack_align ); |
---|
138 | |
---|
139 | /* Allocate floating-point context in stack area */ |
---|
140 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
141 | if ( config->is_fp ) { |
---|
142 | stack_end -= CONTEXT_FP_SIZE; |
---|
143 | the_thread->fp_context = (Context_Control_fp *) stack_end; |
---|
144 | the_thread->Start.fp_context = (Context_Control_fp *) stack_end; |
---|
145 | } |
---|
146 | #endif |
---|
147 | |
---|
148 | tls_size = _TLS_Get_allocation_size(); |
---|
149 | |
---|
150 | /* Allocate thread-local storage (TLS) area in stack area */ |
---|
151 | if ( tls_size > 0 ) { |
---|
152 | uintptr_t tls_align; |
---|
153 | |
---|
154 | stack_end -= tls_size; |
---|
155 | tls_align = (uintptr_t) _TLS_Alignment; |
---|
156 | the_thread->Start.tls_area = (void *) |
---|
157 | ( ( (uintptr_t) stack_end + tls_align - 1 ) & ~( tls_align - 1 ) ); |
---|
158 | } |
---|
159 | |
---|
160 | _Stack_Initialize( |
---|
161 | &the_thread->Start.Initial_stack, |
---|
162 | stack_begin, |
---|
163 | stack_end - stack_begin |
---|
164 | ); |
---|
165 | |
---|
166 | /* |
---|
167 | * Get thread queue heads |
---|
168 | */ |
---|
169 | the_thread->Wait.spare_heads = _Freechain_Pop( |
---|
170 | &information->Thread_queue_heads.Free |
---|
171 | ); |
---|
172 | _Thread_queue_Heads_initialize( the_thread->Wait.spare_heads ); |
---|
173 | |
---|
174 | /* |
---|
175 | * General initialization |
---|
176 | */ |
---|
177 | |
---|
178 | the_thread->is_fp = config->is_fp; |
---|
179 | the_thread->Start.isr_level = config->isr_level; |
---|
180 | the_thread->Start.is_preemptible = config->is_preemptible; |
---|
181 | the_thread->Start.budget_algorithm = config->budget_algorithm; |
---|
182 | the_thread->Start.budget_callout = config->budget_callout; |
---|
183 | the_thread->Start.stack_free = config->stack_free; |
---|
184 | |
---|
185 | _Thread_Timer_initialize( &the_thread->Timer, cpu ); |
---|
186 | |
---|
187 | switch ( config->budget_algorithm ) { |
---|
188 | case THREAD_CPU_BUDGET_ALGORITHM_NONE: |
---|
189 | case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE: |
---|
190 | break; |
---|
191 | #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE) |
---|
192 | case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE: |
---|
193 | the_thread->cpu_time_budget = |
---|
194 | rtems_configuration_get_ticks_per_timeslice(); |
---|
195 | break; |
---|
196 | #endif |
---|
197 | #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT) |
---|
198 | case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT: |
---|
199 | break; |
---|
200 | #endif |
---|
201 | } |
---|
202 | |
---|
203 | #if defined(RTEMS_SMP) |
---|
204 | scheduler_node = NULL; |
---|
205 | scheduler_node_for_index = the_thread->Scheduler.nodes; |
---|
206 | scheduler_for_index = &_Scheduler_Table[ 0 ]; |
---|
207 | scheduler_index = 0; |
---|
208 | |
---|
209 | while ( scheduler_index < _Scheduler_Count ) { |
---|
210 | Priority_Control priority_for_index; |
---|
211 | |
---|
212 | if ( scheduler_for_index == config->scheduler ) { |
---|
213 | priority_for_index = config->priority; |
---|
214 | scheduler_node = scheduler_node_for_index; |
---|
215 | } else { |
---|
216 | /* |
---|
217 | * Use the idle thread priority for the non-home scheduler instances by |
---|
218 | * default. |
---|
219 | */ |
---|
220 | priority_for_index = _Scheduler_Map_priority( |
---|
221 | scheduler_for_index, |
---|
222 | scheduler_for_index->maximum_priority |
---|
223 | ); |
---|
224 | } |
---|
225 | |
---|
226 | _Scheduler_Node_initialize( |
---|
227 | scheduler_for_index, |
---|
228 | scheduler_node_for_index, |
---|
229 | the_thread, |
---|
230 | priority_for_index |
---|
231 | ); |
---|
232 | scheduler_node_for_index = (Scheduler_Node *) |
---|
233 | ( (uintptr_t) scheduler_node_for_index + _Scheduler_Node_size ); |
---|
234 | ++scheduler_for_index; |
---|
235 | ++scheduler_index; |
---|
236 | } |
---|
237 | |
---|
238 | _Assert( scheduler_node != NULL ); |
---|
239 | _Chain_Initialize_one( |
---|
240 | &the_thread->Scheduler.Wait_nodes, |
---|
241 | &scheduler_node->Thread.Wait_node |
---|
242 | ); |
---|
243 | _Chain_Initialize_one( |
---|
244 | &the_thread->Scheduler.Scheduler_nodes, |
---|
245 | &scheduler_node->Thread.Scheduler_node.Chain |
---|
246 | ); |
---|
247 | #else |
---|
248 | scheduler_node = _Thread_Scheduler_get_home_node( the_thread ); |
---|
249 | _Scheduler_Node_initialize( |
---|
250 | config->scheduler, |
---|
251 | scheduler_node, |
---|
252 | the_thread, |
---|
253 | config->priority |
---|
254 | ); |
---|
255 | #endif |
---|
256 | |
---|
257 | _Priority_Node_initialize( &the_thread->Real_priority, config->priority ); |
---|
258 | _Priority_Initialize_one( |
---|
259 | &scheduler_node->Wait.Priority, |
---|
260 | &the_thread->Real_priority |
---|
261 | ); |
---|
262 | |
---|
263 | #if defined(RTEMS_SMP) |
---|
264 | RTEMS_STATIC_ASSERT( THREAD_SCHEDULER_BLOCKED == 0, Scheduler_state ); |
---|
265 | the_thread->Scheduler.home_scheduler = config->scheduler; |
---|
266 | _ISR_lock_Initialize( &the_thread->Scheduler.Lock, "Thread Scheduler" ); |
---|
267 | _Processor_mask_Assign( |
---|
268 | &the_thread->Scheduler.Affinity, |
---|
269 | _SMP_Get_online_processors() |
---|
270 | ); |
---|
271 | _ISR_lock_Initialize( &the_thread->Wait.Lock.Default, "Thread Wait Default" ); |
---|
272 | _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer ); |
---|
273 | _RBTree_Initialize_node( &the_thread->Wait.Link.Registry_node ); |
---|
274 | _SMP_lock_Stats_initialize( &the_thread->Potpourri_stats, "Thread Potpourri" ); |
---|
275 | _SMP_lock_Stats_initialize( &the_thread->Join_queue.Lock_stats, "Thread State" ); |
---|
276 | #endif |
---|
277 | |
---|
278 | /* Initialize the CPU for the non-SMP schedulers */ |
---|
279 | _Thread_Set_CPU( the_thread, cpu ); |
---|
280 | |
---|
281 | the_thread->current_state = STATES_DORMANT; |
---|
282 | the_thread->Wait.operations = &_Thread_queue_Operations_default; |
---|
283 | the_thread->Start.initial_priority = config->priority; |
---|
284 | |
---|
285 | RTEMS_STATIC_ASSERT( THREAD_WAIT_FLAGS_INITIAL == 0, Wait_flags ); |
---|
286 | |
---|
287 | /* POSIX Keys */ |
---|
288 | _RBTree_Initialize_empty( &the_thread->Keys.Key_value_pairs ); |
---|
289 | _ISR_lock_Initialize( &the_thread->Keys.Lock, "POSIX Key Value Pairs" ); |
---|
290 | |
---|
291 | _Thread_Action_control_initialize( &the_thread->Post_switch_actions ); |
---|
292 | |
---|
293 | _Objects_Open_u32( &information->Objects, &the_thread->Object, config->name ); |
---|
294 | |
---|
295 | /* |
---|
296 | * We do following checks of simple error conditions after the thread is |
---|
297 | * fully initialized to simplify the clean up in case of an error. With a |
---|
298 | * fully initialized thread we can simply use _Thread_Free() and do not have |
---|
299 | * to bother with partially initialized threads. |
---|
300 | */ |
---|
301 | |
---|
302 | #if defined(RTEMS_SMP) |
---|
303 | if ( |
---|
304 | !config->is_preemptible |
---|
305 | && !_Scheduler_Is_non_preempt_mode_supported( config->scheduler ) |
---|
306 | ) { |
---|
307 | return false; |
---|
308 | } |
---|
309 | #endif |
---|
310 | |
---|
311 | #if defined(RTEMS_SMP) || CPU_ENABLE_ROBUST_THREAD_DISPATCH == TRUE |
---|
312 | if ( |
---|
313 | config->isr_level != 0 |
---|
314 | #if CPU_ENABLE_ROBUST_THREAD_DISPATCH == FALSE |
---|
315 | && _SMP_Need_inter_processor_interrupts() |
---|
316 | #endif |
---|
317 | ) { |
---|
318 | return false; |
---|
319 | } |
---|
320 | #endif |
---|
321 | |
---|
322 | /* |
---|
323 | * We assume the Allocator Mutex is locked and dispatching is |
---|
324 | * enabled when we get here. We want to be able to run the |
---|
325 | * user extensions with dispatching enabled. The Allocator |
---|
326 | * Mutex provides sufficient protection to let the user extensions |
---|
327 | * run safely. |
---|
328 | */ |
---|
329 | return _User_extensions_Thread_create( the_thread ); |
---|
330 | } |
---|
331 | |
---|
332 | Status_Control _Thread_Initialize( |
---|
333 | Thread_Information *information, |
---|
334 | Thread_Control *the_thread, |
---|
335 | const Thread_Configuration *config |
---|
336 | ) |
---|
337 | { |
---|
338 | bool ok; |
---|
339 | |
---|
340 | ok = _Thread_Try_initialize( information, the_thread, config ); |
---|
341 | |
---|
342 | if ( !ok ) { |
---|
343 | _Objects_Close( &information->Objects, &the_thread->Object ); |
---|
344 | _Thread_Free( information, the_thread ); |
---|
345 | |
---|
346 | return STATUS_UNSATISFIED; |
---|
347 | } |
---|
348 | |
---|
349 | return STATUS_SUCCESSFUL; |
---|
350 | } |
---|