source: rtems/cpukit/score/src/threadinitialize.c

Last change on this file was 45393975, checked in by Sebastian Huber <sebastian.huber@…>, on 01/28/23 at 09:56:38

score: Help static analysis in thread init

Add an assert to _Thread_Initialize_scheduler_and_wait_nodes() which may
help a static analyzer. Use a do/while loop since we have at least one
scheduler.

Update #4832.

  • Property mode set to 100644
File size: 11.7 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSScoreThread
7 *
8 * @brief This source file contains the implementation of
9 *   _Thread_Initialize().
10 */
11
12/*
13 *  COPYRIGHT (c) 1989-2014.
14 *  On-Line Applications Research Corporation (OAR).
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 *    notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 *    notice, this list of conditions and the following disclaimer in the
23 *    documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#ifdef HAVE_CONFIG_H
39#include "config.h"
40#endif
41
42#include <rtems/score/threadimpl.h>
43#include <rtems/score/freechainimpl.h>
44#include <rtems/score/schedulerimpl.h>
45#include <rtems/score/stackimpl.h>
46#include <rtems/score/tls.h>
47#include <rtems/score/userextimpl.h>
48#include <rtems/score/watchdogimpl.h>
49
50void _Thread_Free(
51  Thread_Information *information,
52  Thread_Control     *the_thread
53)
54{
55#if defined(RTEMS_SMP)
56  Scheduler_Node *scheduler_node;
57  size_t          scheduler_index;
58#endif
59
60  _User_extensions_Thread_delete( the_thread );
61  _User_extensions_Destroy_iterators( the_thread );
62  _ISR_lock_Destroy( &the_thread->Keys.Lock );
63
64#if defined(RTEMS_SMP)
65  scheduler_node = the_thread->Scheduler.nodes;
66  scheduler_index = 0;
67
68  while ( scheduler_index < _Scheduler_Count ) {
69    _Scheduler_Node_destroy(
70      &_Scheduler_Table[ scheduler_index ],
71      scheduler_node
72    );
73    scheduler_node = (Scheduler_Node *)
74      ( (uintptr_t) scheduler_node + _Scheduler_Node_size );
75    ++scheduler_index;
76  }
77#else
78  _Scheduler_Node_destroy(
79    _Thread_Scheduler_get_home( the_thread ),
80    _Thread_Scheduler_get_home_node( the_thread )
81  );
82#endif
83
84  _ISR_lock_Destroy( &the_thread->Timer.Lock );
85
86  /*
87   *  The thread might have been FP.  So deal with that.
88   */
89#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
90#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
91  if ( _Thread_Is_allocated_fp( the_thread ) )
92    _Thread_Deallocate_fp();
93#endif
94#endif
95
96  _Freechain_Push(
97    &information->Thread_queue_heads.Free,
98    the_thread->Wait.spare_heads
99  );
100
101  /*
102   *  Free the rest of the memory associated with this task
103   *  and set the associated pointers to NULL for safety.
104   */
105  ( *the_thread->Start.stack_free )( the_thread->Start.Initial_stack.area );
106
107#if defined(RTEMS_SMP)
108  _ISR_lock_Destroy( &the_thread->Scheduler.Lock );
109  _ISR_lock_Destroy( &the_thread->Wait.Lock.Default );
110  _SMP_lock_Stats_destroy( &the_thread->Potpourri_stats );
111#endif
112
113  _Thread_queue_Destroy( &the_thread->Join_queue );
114  _Context_Destroy( the_thread, &the_thread->Registers );
115  _Objects_Free( &information->Objects, &the_thread->Object );
116}
117
118static void _Thread_Initialize_scheduler_and_wait_nodes(
119  Thread_Control             *the_thread,
120  const Thread_Configuration *config
121)
122{
123  Scheduler_Node          *home_scheduler_node;
124#if defined(RTEMS_SMP)
125  Scheduler_Node          *scheduler_node;
126  const Scheduler_Control *scheduler;
127  size_t                   scheduler_index;
128#endif
129
130#if defined(RTEMS_SMP)
131  home_scheduler_node = NULL;
132  scheduler_node = the_thread->Scheduler.nodes;
133  scheduler = &_Scheduler_Table[ 0 ];
134  scheduler_index = 0;
135
136  /*
137   * In SMP configurations, the thread has exactly one scheduler node for each
138   * configured scheduler.  Initialize the scheduler nodes of each scheduler.
139   * The application configuration ensures that we have at least one scheduler
140   * configured.
141   */
142
143  _Assert ( _Scheduler_Count >= 1 );
144
145  do {
146    Priority_Control priority;
147
148    if ( scheduler == config->scheduler ) {
149      priority = config->priority;
150      home_scheduler_node = scheduler_node;
151    } else {
152      /*
153       * Use the idle thread priority for the non-home scheduler instances by
154       * default.
155       */
156      priority = _Scheduler_Map_priority(
157        scheduler,
158        scheduler->maximum_priority
159      );
160    }
161
162    _Scheduler_Node_initialize(
163      scheduler,
164      scheduler_node,
165      the_thread,
166      priority
167    );
168
169    /*
170     * Since the size of a scheduler node depends on the application
171     * configuration, the _Scheduler_Node_size constant is used to get the next
172     * scheduler node.  Using sizeof( Scheduler_Node ) would be wrong.
173     */
174    scheduler_node = (Scheduler_Node *)
175      ( (uintptr_t) scheduler_node + _Scheduler_Node_size );
176    ++scheduler;
177    ++scheduler_index;
178  } while ( scheduler_index < _Scheduler_Count );
179
180  /*
181   * The thread is initialized to use exactly one scheduler node which is
182   * provided by its home scheduler.
183   */
184  _Assert( home_scheduler_node != NULL );
185  _Chain_Initialize_one(
186    &the_thread->Scheduler.Wait_nodes,
187    &home_scheduler_node->Thread.Wait_node
188  );
189  _Chain_Initialize_one(
190    &the_thread->Scheduler.Scheduler_nodes,
191    &home_scheduler_node->Thread.Scheduler_node.Chain
192  );
193#else
194  /*
195   * In uniprocessor configurations, the thread has exactly one scheduler node.
196   */
197  home_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
198  _Scheduler_Node_initialize(
199    config->scheduler,
200    home_scheduler_node,
201    the_thread,
202    config->priority
203  );
204#endif
205
206  /*
207   * The current priority of the thread is initialized to exactly the real
208   * priority of the thread.  During the lifetime of the thread, it may gain
209   * more priority nodes, for example through locking protocols such as
210   * priority inheritance or priority ceiling.
211   */
212  _Priority_Node_initialize( &the_thread->Real_priority, config->priority );
213  _Priority_Initialize_one(
214    &home_scheduler_node->Wait.Priority,
215    &the_thread->Real_priority
216  );
217
218#if defined(RTEMS_SMP)
219  RTEMS_STATIC_ASSERT( THREAD_SCHEDULER_BLOCKED == 0, Scheduler_state );
220  the_thread->Scheduler.home_scheduler = config->scheduler;
221  _ISR_lock_Initialize( &the_thread->Scheduler.Lock, "Thread Scheduler" );
222  _ISR_lock_Initialize( &the_thread->Wait.Lock.Default, "Thread Wait Default" );
223  _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
224  _RBTree_Initialize_node( &the_thread->Wait.Link.Registry_node );
225#endif
226}
227
228static bool _Thread_Try_initialize(
229  Thread_Information         *information,
230  Thread_Control             *the_thread,
231  const Thread_Configuration *config
232)
233{
234  uintptr_t                tls_size;
235  size_t                   i;
236  char                    *stack_begin;
237  char                    *stack_end;
238  uintptr_t                stack_align;
239  Per_CPU_Control         *cpu = _Per_CPU_Get_by_index( 0 );
240
241  memset(
242    &the_thread->Join_queue,
243    0,
244    information->Objects.object_size - offsetof( Thread_Control, Join_queue )
245  );
246
247  for ( i = 0 ; i < _Thread_Control_add_on_count ; ++i ) {
248    const Thread_Control_add_on *add_on = &_Thread_Control_add_ons[ i ];
249
250    *(void **) ( (char *) the_thread + add_on->destination_offset ) =
251      (char *) the_thread + add_on->source_offset;
252  }
253
254  /* Set up the properly aligned stack area begin and end */
255  stack_begin = config->stack_area;
256  stack_end = stack_begin + config->stack_size;
257  stack_align = CPU_STACK_ALIGNMENT;
258  stack_end = (char *) RTEMS_ALIGN_DOWN( (uintptr_t) stack_end, stack_align );
259
260  /* Allocate floating-point context in stack area */
261#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
262  if ( config->is_fp ) {
263    stack_end -= CONTEXT_FP_SIZE;
264    the_thread->fp_context = (Context_Control_fp *) stack_end;
265    the_thread->Start.fp_context = (Context_Control_fp *) stack_end;
266  }
267#endif
268
269  tls_size = _TLS_Get_allocation_size();
270
271  /* Allocate thread-local storage (TLS) area in stack area */
272  if ( tls_size > 0 ) {
273    stack_end -= tls_size;
274    the_thread->Start.tls_area = stack_end;
275  }
276
277  _Stack_Initialize(
278    &the_thread->Start.Initial_stack,
279    stack_begin,
280    stack_end - stack_begin
281  );
282
283  /*
284   *  Get thread queue heads
285   */
286  the_thread->Wait.spare_heads = _Freechain_Pop(
287    &information->Thread_queue_heads.Free
288  );
289  _Thread_queue_Heads_initialize( the_thread->Wait.spare_heads );
290
291  /*
292   *  General initialization
293   */
294
295  the_thread->is_fp                       = config->is_fp;
296  the_thread->Start.isr_level             = config->isr_level;
297  the_thread->Start.is_preemptible        = config->is_preemptible;
298  the_thread->Start.cpu_budget_operations = config->cpu_budget_operations;
299  the_thread->Start.stack_free            = config->stack_free;
300  the_thread->Join_queue.Queue.owner      = the_thread;
301
302  _Thread_Timer_initialize( &the_thread->Timer, cpu );
303  _Thread_Initialize_scheduler_and_wait_nodes( the_thread, config );
304
305#if defined(RTEMS_SMP)
306  _Processor_mask_Assign(
307    &the_thread->Scheduler.Affinity,
308    _SMP_Get_online_processors()
309   );
310  _SMP_lock_Stats_initialize( &the_thread->Potpourri_stats, "Thread Potpourri" );
311  _SMP_lock_Stats_initialize( &the_thread->Join_queue.Lock_stats, "Thread State" );
312#endif
313
314  /* Initialize the CPU for the non-SMP schedulers */
315  _Thread_Set_CPU( the_thread, cpu );
316
317  the_thread->current_state           = STATES_DORMANT;
318  the_thread->Wait.operations         = &_Thread_queue_Operations_default;
319  the_thread->Start.initial_priority  = config->priority;
320
321  RTEMS_STATIC_ASSERT( THREAD_WAIT_STATE_READY == 0, Wait_flags );
322
323  /* POSIX Keys */
324  _RBTree_Initialize_empty( &the_thread->Keys.Key_value_pairs );
325  _ISR_lock_Initialize( &the_thread->Keys.Lock, "POSIX Key Value Pairs" );
326
327  _Thread_Action_control_initialize( &the_thread->Post_switch_actions );
328
329  _Objects_Open_u32( &information->Objects, &the_thread->Object, config->name );
330
331  /*
332   * We do following checks of simple error conditions after the thread is
333   * fully initialized to simplify the clean up in case of an error.  With a
334   * fully initialized thread we can simply use _Thread_Free() and do not have
335   * to bother with partially initialized threads.
336   */
337
338#if defined(RTEMS_SMP)
339  if (
340    !config->is_preemptible
341      && !_Scheduler_Is_non_preempt_mode_supported( config->scheduler )
342  ) {
343    return false;
344  }
345#endif
346
347#if defined(RTEMS_SMP) || CPU_ENABLE_ROBUST_THREAD_DISPATCH == TRUE
348  if (
349    config->isr_level != 0
350#if CPU_ENABLE_ROBUST_THREAD_DISPATCH == FALSE
351      && _SMP_Need_inter_processor_interrupts()
352#endif
353  ) {
354    return false;
355  }
356#endif
357
358  /*
359   *  We assume the Allocator Mutex is locked and dispatching is
360   *  enabled when we get here.  We want to be able to run the
361   *  user extensions with dispatching enabled.  The Allocator
362   *  Mutex provides sufficient protection to let the user extensions
363   *  run safely.
364   */
365  return _User_extensions_Thread_create( the_thread );
366}
367
368Status_Control _Thread_Initialize(
369  Thread_Information         *information,
370  Thread_Control             *the_thread,
371  const Thread_Configuration *config
372)
373{
374  bool ok;
375
376  ok = _Thread_Try_initialize( information, the_thread, config );
377
378  if ( !ok ) {
379    _Objects_Close( &information->Objects, &the_thread->Object );
380    _Thread_Free( information, the_thread );
381
382    return STATUS_UNSATISFIED;
383  }
384
385  return STATUS_SUCCESSFUL;
386}
Note: See TracBrowser for help on using the repository browser.