[5618c37a] | 1 | /** |
---|
| 2 | * @file |
---|
| 3 | * |
---|
| 4 | * @brief Inlined Routines from the Thread Handler |
---|
| 5 | * |
---|
| 6 | * This file contains the macro implementation of the inlined |
---|
| 7 | * routines from the Thread handler. |
---|
| 8 | */ |
---|
| 9 | |
---|
| 10 | /* |
---|
| 11 | * COPYRIGHT (c) 1989-2008. |
---|
| 12 | * On-Line Applications Research Corporation (OAR). |
---|
| 13 | * |
---|
[1b1be254] | 14 | * Copyright (c) 2014 embedded brains GmbH. |
---|
| 15 | * |
---|
[5618c37a] | 16 | * The license and distribution terms for this file may be |
---|
| 17 | * found in the file LICENSE in this distribution or at |
---|
[c499856] | 18 | * http://www.rtems.org/license/LICENSE. |
---|
[5618c37a] | 19 | */ |
---|
| 20 | |
---|
| 21 | #ifndef _RTEMS_SCORE_THREADIMPL_H |
---|
| 22 | #define _RTEMS_SCORE_THREADIMPL_H |
---|
| 23 | |
---|
| 24 | #include <rtems/score/thread.h> |
---|
[0dd732d] | 25 | #include <rtems/score/chainimpl.h> |
---|
[514705d] | 26 | #include <rtems/score/interr.h> |
---|
[6c0e43d] | 27 | #include <rtems/score/isr.h> |
---|
[a2e3f33] | 28 | #include <rtems/score/objectimpl.h> |
---|
[3045738] | 29 | #include <rtems/score/resourceimpl.h> |
---|
[fe6c170c] | 30 | #include <rtems/score/statesimpl.h> |
---|
[16a41cc] | 31 | #include <rtems/score/sysstate.h> |
---|
[f031df0e] | 32 | #include <rtems/score/todimpl.h> |
---|
[16a41cc] | 33 | #include <rtems/config.h> |
---|
[5618c37a] | 34 | |
---|
| 35 | #ifdef __cplusplus |
---|
| 36 | extern "C" { |
---|
| 37 | #endif |
---|
| 38 | |
---|
| 39 | /** |
---|
| 40 | * @addtogroup ScoreThread |
---|
| 41 | */ |
---|
| 42 | /**@{**/ |
---|
| 43 | |
---|
| 44 | /** |
---|
| 45 | * The following structure contains the information necessary to manage |
---|
| 46 | * a thread which it is waiting for a resource. |
---|
| 47 | */ |
---|
| 48 | #define THREAD_STATUS_PROXY_BLOCKING 0x1111111 |
---|
| 49 | |
---|
| 50 | /** |
---|
| 51 | * Self for the GNU Ada Run-Time |
---|
| 52 | */ |
---|
| 53 | SCORE_EXTERN void *rtems_ada_self; |
---|
| 54 | |
---|
| 55 | /** |
---|
| 56 | * The following defines the information control block used to |
---|
| 57 | * manage this class of objects. |
---|
| 58 | */ |
---|
| 59 | SCORE_EXTERN Objects_Information _Thread_Internal_information; |
---|
| 60 | |
---|
| 61 | /** |
---|
| 62 | * The following points to the thread whose floating point |
---|
| 63 | * context is currently loaded. |
---|
| 64 | */ |
---|
| 65 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
| 66 | SCORE_EXTERN Thread_Control *_Thread_Allocated_fp; |
---|
| 67 | #endif |
---|
| 68 | |
---|
| 69 | #if !defined(__DYNAMIC_REENT__) |
---|
| 70 | /** |
---|
| 71 | * The C library re-enter-rant global pointer. Some C library implementations |
---|
| 72 | * such as newlib have a single global pointer that changed during a context |
---|
| 73 | * switch. The pointer points to that global pointer. The Thread control block |
---|
| 74 | * holds a pointer to the task specific data. |
---|
| 75 | */ |
---|
| 76 | SCORE_EXTERN struct _reent **_Thread_libc_reent; |
---|
| 77 | #endif |
---|
| 78 | |
---|
[40dcafa] | 79 | #define THREAD_RBTREE_NODE_TO_THREAD( node ) \ |
---|
| 80 | RTEMS_CONTAINER_OF( node, Thread_Control, RBNode ) |
---|
| 81 | |
---|
| 82 | #if defined(RTEMS_SMP) |
---|
| 83 | #define THREAD_RESOURCE_NODE_TO_THREAD( node ) \ |
---|
| 84 | RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node ) |
---|
| 85 | #endif |
---|
| 86 | |
---|
[5618c37a] | 87 | /** |
---|
| 88 | * @brief Initialize thread handler. |
---|
| 89 | * |
---|
| 90 | * This routine performs the initialization necessary for this handler. |
---|
| 91 | */ |
---|
| 92 | void _Thread_Handler_initialization(void); |
---|
| 93 | |
---|
| 94 | /** |
---|
| 95 | * @brief Create idle thread. |
---|
| 96 | * |
---|
| 97 | * This routine creates the idle thread. |
---|
| 98 | * |
---|
| 99 | * @warning No thread should be created before this one. |
---|
| 100 | */ |
---|
| 101 | void _Thread_Create_idle(void); |
---|
| 102 | |
---|
| 103 | /** |
---|
| 104 | * @brief Start thread multitasking. |
---|
| 105 | * |
---|
| 106 | * This routine initiates multitasking. It is invoked only as |
---|
| 107 | * part of initialization and its invocation is the last act of |
---|
| 108 | * the non-multitasking part of the system initialization. |
---|
| 109 | */ |
---|
[514705d] | 110 | void _Thread_Start_multitasking( void ) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE; |
---|
[5618c37a] | 111 | |
---|
| 112 | /** |
---|
| 113 | * @brief Allocate the requested stack space for the thread. |
---|
| 114 | * |
---|
| 115 | * Allocate the requested stack space for the thread. |
---|
| 116 | * Set the Start.stack field to the address of the stack. |
---|
| 117 | * |
---|
| 118 | * @param[in] the_thread is the thread where the stack space is requested |
---|
| 119 | * |
---|
| 120 | * @retval actual size allocated after any adjustment |
---|
| 121 | * @retval zero if the allocation failed |
---|
| 122 | */ |
---|
| 123 | size_t _Thread_Stack_Allocate( |
---|
| 124 | Thread_Control *the_thread, |
---|
| 125 | size_t stack_size |
---|
| 126 | ); |
---|
| 127 | |
---|
| 128 | /** |
---|
| 129 | * @brief Deallocate thread stack. |
---|
| 130 | * |
---|
| 131 | * Deallocate the Thread's stack. |
---|
| 132 | */ |
---|
| 133 | void _Thread_Stack_Free( |
---|
| 134 | Thread_Control *the_thread |
---|
| 135 | ); |
---|
| 136 | |
---|
| 137 | /** |
---|
| 138 | * @brief Initialize thread. |
---|
| 139 | * |
---|
| 140 | * This routine initializes the specified the thread. It allocates |
---|
| 141 | * all memory associated with this thread. It completes by adding |
---|
| 142 | * the thread to the local object table so operations on this |
---|
| 143 | * thread id are allowed. |
---|
| 144 | * |
---|
| 145 | * @note If stack_area is NULL, it is allocated from the workspace. |
---|
| 146 | * |
---|
| 147 | * @note If the stack is allocated from the workspace, then it is |
---|
| 148 | * guaranteed to be of at least minimum size. |
---|
| 149 | */ |
---|
| 150 | bool _Thread_Initialize( |
---|
| 151 | Objects_Information *information, |
---|
| 152 | Thread_Control *the_thread, |
---|
[c5831a3f] | 153 | const struct Scheduler_Control *scheduler, |
---|
[5618c37a] | 154 | void *stack_area, |
---|
| 155 | size_t stack_size, |
---|
| 156 | bool is_fp, |
---|
| 157 | Priority_Control priority, |
---|
| 158 | bool is_preemptible, |
---|
| 159 | Thread_CPU_budget_algorithms budget_algorithm, |
---|
| 160 | Thread_CPU_budget_algorithm_callout budget_callout, |
---|
| 161 | uint32_t isr_level, |
---|
| 162 | Objects_Name name |
---|
| 163 | ); |
---|
| 164 | |
---|
| 165 | /** |
---|
| 166 | * @brief Initializes thread and executes it. |
---|
| 167 | * |
---|
| 168 | * This routine initializes the executable information for a thread |
---|
| 169 | * and makes it ready to execute. After this routine executes, the |
---|
| 170 | * thread competes with all other threads for CPU time. |
---|
| 171 | * |
---|
| 172 | * @param the_thread is the thread to be initialized |
---|
| 173 | * @param the_prototype |
---|
| 174 | * @param entry_point |
---|
| 175 | * @param pointer_argument |
---|
| 176 | * @param numeric_argument |
---|
[24934e36] | 177 | * @param[in,out] cpu The processor if used to start an idle thread |
---|
[5618c37a] | 178 | * during system initialization. Must be set to @c NULL to start a normal |
---|
| 179 | * thread. |
---|
| 180 | */ |
---|
| 181 | bool _Thread_Start( |
---|
| 182 | Thread_Control *the_thread, |
---|
| 183 | Thread_Start_types the_prototype, |
---|
| 184 | void *entry_point, |
---|
| 185 | void *pointer_argument, |
---|
| 186 | Thread_Entry_numeric_type numeric_argument, |
---|
[24934e36] | 187 | Per_CPU_Control *cpu |
---|
[5618c37a] | 188 | ); |
---|
| 189 | |
---|
| 190 | bool _Thread_Restart( |
---|
| 191 | Thread_Control *the_thread, |
---|
[8690b53] | 192 | Thread_Control *executing, |
---|
[5618c37a] | 193 | void *pointer_argument, |
---|
| 194 | Thread_Entry_numeric_type numeric_argument |
---|
| 195 | ); |
---|
| 196 | |
---|
[701dd96f] | 197 | void _Thread_Yield( Thread_Control *executing ); |
---|
| 198 | |
---|
[1b1be254] | 199 | bool _Thread_Set_life_protection( bool protect ); |
---|
| 200 | |
---|
[5c731a83] | 201 | void _Thread_Life_action_handler( |
---|
| 202 | Thread_Control *executing, |
---|
| 203 | Thread_Action *action, |
---|
| 204 | Per_CPU_Control *cpu, |
---|
| 205 | ISR_Level level |
---|
| 206 | ); |
---|
| 207 | |
---|
[5618c37a] | 208 | /** |
---|
[1b1be254] | 209 | * @brief Kills all zombie threads in the system. |
---|
[5618c37a] | 210 | * |
---|
[1b1be254] | 211 | * Threads change into the zombie state as the last step in the thread |
---|
| 212 | * termination sequence right before a context switch to the heir thread is |
---|
| 213 | * initiated. Since the thread stack is still in use during this phase we have |
---|
| 214 | * to postpone the thread stack reclamation until this point. On SMP |
---|
| 215 | * configurations we may have to busy wait for context switch completion here. |
---|
[5618c37a] | 216 | */ |
---|
[1b1be254] | 217 | void _Thread_Kill_zombies( void ); |
---|
| 218 | |
---|
| 219 | /** |
---|
| 220 | * @brief Closes the thread. |
---|
| 221 | * |
---|
| 222 | * Closes the thread object and starts the thread termination sequence. In |
---|
| 223 | * case the executing thread is not terminated, then this function waits until |
---|
| 224 | * the terminating thread reached the zombie state. |
---|
| 225 | */ |
---|
| 226 | void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing ); |
---|
[5618c37a] | 227 | |
---|
| 228 | /** |
---|
| 229 | * @brief Removes any set states for @a the_thread. |
---|
| 230 | * |
---|
| 231 | * This routine removes any set states for @a the_thread. It performs |
---|
| 232 | * any necessary scheduling operations including the selection of |
---|
| 233 | * a new heir thread. |
---|
| 234 | * |
---|
| 235 | * - INTERRUPT LATENCY: |
---|
| 236 | * + ready chain |
---|
| 237 | * + select heir |
---|
| 238 | */ |
---|
| 239 | void _Thread_Ready( |
---|
| 240 | Thread_Control *the_thread |
---|
| 241 | ); |
---|
| 242 | |
---|
| 243 | /** |
---|
| 244 | * @brief Clears the indicated STATES for @a the_thread. |
---|
| 245 | * |
---|
| 246 | * This routine clears the indicated STATES for @a the_thread. It performs |
---|
| 247 | * any necessary scheduling operations including the selection of |
---|
| 248 | * a new heir thread. |
---|
| 249 | * |
---|
| 250 | * - INTERRUPT LATENCY: |
---|
| 251 | * + priority map |
---|
| 252 | * + select heir |
---|
| 253 | */ |
---|
| 254 | void _Thread_Clear_state( |
---|
| 255 | Thread_Control *the_thread, |
---|
| 256 | States_Control state |
---|
| 257 | ); |
---|
| 258 | |
---|
| 259 | /** |
---|
| 260 | * @brief Sets the indicated @a state for @a the_thread. |
---|
| 261 | * |
---|
| 262 | * This routine sets the indicated @a state for @a the_thread. It performs |
---|
| 263 | * any necessary scheduling operations including the selection of |
---|
| 264 | * a new heir thread. |
---|
| 265 | * |
---|
| 266 | * @param[in] the_thread is the thread to set the state for. |
---|
| 267 | * @param[in] state is the state to set the_thread to. |
---|
| 268 | * |
---|
| 269 | * - INTERRUPT LATENCY: |
---|
| 270 | * + ready chain |
---|
| 271 | * + select map |
---|
| 272 | */ |
---|
| 273 | void _Thread_Set_state( |
---|
| 274 | Thread_Control *the_thread, |
---|
| 275 | States_Control state |
---|
| 276 | ); |
---|
| 277 | |
---|
| 278 | /** |
---|
| 279 | * @brief Initializes enviroment for a thread. |
---|
| 280 | * |
---|
| 281 | * This routine initializes the context of @a the_thread to its |
---|
| 282 | * appropriate starting state. |
---|
| 283 | * |
---|
| 284 | * @param[in] the_thread is the pointer to the thread control block. |
---|
| 285 | */ |
---|
| 286 | void _Thread_Load_environment( |
---|
| 287 | Thread_Control *the_thread |
---|
| 288 | ); |
---|
| 289 | |
---|
| 290 | /** |
---|
| 291 | * @brief Wrapper function for all threads. |
---|
| 292 | * |
---|
| 293 | * This routine is the wrapper function for all threads. It is |
---|
| 294 | * the starting point for all threads. The user provided thread |
---|
| 295 | * entry point is invoked by this routine. Operations |
---|
| 296 | * which must be performed immediately before and after the user's |
---|
| 297 | * thread executes are found here. |
---|
| 298 | * |
---|
| 299 | * @note On entry, it is assumed all interrupts are blocked and that this |
---|
| 300 | * routine needs to set the initial isr level. This may or may not |
---|
| 301 | * actually be needed by the context switch routine and as a result |
---|
| 302 | * interrupts may already be at there proper level. Either way, |
---|
| 303 | * setting the initial isr level properly here is safe. |
---|
| 304 | */ |
---|
| 305 | void _Thread_Handler( void ); |
---|
| 306 | |
---|
[a38ced2] | 307 | /** |
---|
| 308 | * @brief Executes the global constructors and then restarts itself as the |
---|
| 309 | * first initialization thread. |
---|
| 310 | * |
---|
| 311 | * The first initialization thread is the first RTEMS initialization task or |
---|
| 312 | * the first POSIX initialization thread in case no RTEMS initialization tasks |
---|
| 313 | * are present. |
---|
| 314 | */ |
---|
| 315 | void *_Thread_Global_construction( void ); |
---|
| 316 | |
---|
[5618c37a] | 317 | /** |
---|
| 318 | * @brief Ended the delay of a thread. |
---|
| 319 | * |
---|
| 320 | * This routine is invoked when a thread must be unblocked at the |
---|
| 321 | * end of a time based delay (i.e. wake after or wake when). |
---|
| 322 | * It is called by the watchdog handler. |
---|
| 323 | * |
---|
| 324 | * @param[in] id is the thread id |
---|
| 325 | */ |
---|
| 326 | void _Thread_Delay_ended( |
---|
| 327 | Objects_Id id, |
---|
| 328 | void *ignored |
---|
| 329 | ); |
---|
| 330 | |
---|
| 331 | /** |
---|
| 332 | * @brief Change the priority of a thread. |
---|
| 333 | * |
---|
| 334 | * This routine changes the current priority of @a the_thread to |
---|
| 335 | * @a new_priority. It performs any necessary scheduling operations |
---|
| 336 | * including the selection of a new heir thread. |
---|
| 337 | * |
---|
| 338 | * @param[in] the_thread is the thread to change |
---|
| 339 | * @param[in] new_priority is the priority to set @a the_thread to |
---|
| 340 | * @param[in] prepend_it is a switch to prepend the thread |
---|
| 341 | */ |
---|
| 342 | void _Thread_Change_priority ( |
---|
| 343 | Thread_Control *the_thread, |
---|
| 344 | Priority_Control new_priority, |
---|
| 345 | bool prepend_it |
---|
| 346 | ); |
---|
| 347 | |
---|
| 348 | /** |
---|
| 349 | * @brief Set thread priority. |
---|
| 350 | * |
---|
| 351 | * This routine updates the priority related fields in the_thread |
---|
| 352 | * control block to indicate the current priority is now new_priority. |
---|
| 353 | */ |
---|
| 354 | void _Thread_Set_priority( |
---|
| 355 | Thread_Control *the_thread, |
---|
| 356 | Priority_Control new_priority |
---|
| 357 | ); |
---|
| 358 | |
---|
| 359 | /** |
---|
| 360 | * This routine updates the related suspend fields in the_thread |
---|
| 361 | * control block to indicate the current nested level. |
---|
| 362 | */ |
---|
| 363 | #define _Thread_Suspend( _the_thread ) \ |
---|
| 364 | _Thread_Set_state( _the_thread, STATES_SUSPENDED ) |
---|
| 365 | |
---|
| 366 | /** |
---|
| 367 | * This routine updates the related suspend fields in the_thread |
---|
| 368 | * control block to indicate the current nested level. A force |
---|
| 369 | * parameter of true will force a resume and clear the suspend count. |
---|
| 370 | */ |
---|
| 371 | #define _Thread_Resume( _the_thread ) \ |
---|
| 372 | _Thread_Clear_state( _the_thread, STATES_SUSPENDED ) |
---|
| 373 | |
---|
| 374 | /** |
---|
| 375 | * @brief Maps thread Id to a TCB pointer. |
---|
| 376 | * |
---|
| 377 | * This function maps thread IDs to thread control |
---|
| 378 | * blocks. If ID corresponds to a local thread, then it |
---|
| 379 | * returns the_thread control pointer which maps to ID |
---|
| 380 | * and @a location is set to OBJECTS_LOCAL. If the thread ID is |
---|
| 381 | * global and resides on a remote node, then location is set |
---|
| 382 | * to OBJECTS_REMOTE, and the_thread is undefined. |
---|
| 383 | * Otherwise, location is set to OBJECTS_ERROR and |
---|
| 384 | * the_thread is undefined. |
---|
| 385 | * |
---|
| 386 | * @param[in] id is the id of the thread. |
---|
| 387 | * @param[in] location is the location of the block. |
---|
| 388 | * |
---|
| 389 | * @note The performance of many RTEMS services depends upon |
---|
| 390 | * the quick execution of the "good object" path in this |
---|
| 391 | * routine. If there is a possibility of saving a few |
---|
| 392 | * cycles off the execution time, this routine is worth |
---|
| 393 | * further optimization attention. |
---|
| 394 | */ |
---|
| 395 | Thread_Control *_Thread_Get ( |
---|
| 396 | Objects_Id id, |
---|
| 397 | Objects_Locations *location |
---|
| 398 | ); |
---|
| 399 | |
---|
[5b393fa5] | 400 | /** |
---|
| 401 | * @brief Acquires a thread by its identifier. |
---|
| 402 | * |
---|
| 403 | * @see _Objects_Acquire(). |
---|
| 404 | */ |
---|
| 405 | Thread_Control *_Thread_Acquire( |
---|
| 406 | Objects_Id id, |
---|
| 407 | Objects_Locations *location, |
---|
| 408 | ISR_lock_Context *lock_context |
---|
| 409 | ); |
---|
| 410 | |
---|
| 411 | /** |
---|
| 412 | * @brief Acquires the executing thread. |
---|
| 413 | * |
---|
| 414 | * @see _Objects_Acquire(). |
---|
| 415 | */ |
---|
| 416 | Thread_Control *_Thread_Acquire_executing( ISR_lock_Context *lock_context ); |
---|
| 417 | |
---|
[5618c37a] | 418 | /** |
---|
| 419 | * @brief Cancel a blocking operation due to ISR. |
---|
| 420 | * |
---|
| 421 | * This method is used to cancel a blocking operation that was |
---|
| 422 | * satisfied from an ISR while the thread executing was in the |
---|
| 423 | * process of blocking. |
---|
| 424 | * |
---|
| 425 | * This method will restore the previous ISR disable level during the cancel |
---|
| 426 | * operation. Thus it is an implicit _ISR_Enable(). |
---|
| 427 | * |
---|
| 428 | * @param[in] sync_state is the synchronization state |
---|
| 429 | * @param[in] the_thread is the thread whose blocking is canceled |
---|
| 430 | * @param[in] level is the previous ISR disable level |
---|
| 431 | * |
---|
| 432 | * @note This is a rare routine in RTEMS. It is called with |
---|
| 433 | * interrupts disabled and only when an ISR completed |
---|
| 434 | * a blocking condition in process. |
---|
| 435 | */ |
---|
| 436 | void _Thread_blocking_operation_Cancel( |
---|
| 437 | Thread_blocking_operation_States sync_state, |
---|
| 438 | Thread_Control *the_thread, |
---|
| 439 | ISR_Level level |
---|
| 440 | ); |
---|
| 441 | |
---|
[0ea6d07] | 442 | /** |
---|
| 443 | * @brief Finalize a blocking operation. |
---|
| 444 | * |
---|
| 445 | * This method is used to finalize a blocking operation that was |
---|
| 446 | * satisfied. It may be used with thread queues or any other synchronization |
---|
| 447 | * object that uses the blocking states and watchdog times for timeout. |
---|
| 448 | * |
---|
| 449 | * This method will restore the previous ISR disable level during the cancel |
---|
| 450 | * operation. Thus it is an implicit _ISR_Enable(). |
---|
| 451 | * |
---|
| 452 | * @param[in] the_thread is the thread whose blocking is canceled |
---|
| 453 | * @param[in] level is the previous ISR disable level |
---|
| 454 | */ |
---|
| 455 | void _Thread_blocking_operation_Finalize( |
---|
| 456 | Thread_Control *the_thread, |
---|
| 457 | ISR_Level level |
---|
| 458 | ); |
---|
| 459 | |
---|
[a5ac9da] | 460 | RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU( |
---|
| 461 | const Thread_Control *thread |
---|
| 462 | ) |
---|
| 463 | { |
---|
| 464 | #if defined(RTEMS_SMP) |
---|
[2d36931] | 465 | return thread->Scheduler.cpu; |
---|
[a5ac9da] | 466 | #else |
---|
| 467 | (void) thread; |
---|
| 468 | |
---|
| 469 | return _Per_CPU_Get(); |
---|
| 470 | #endif |
---|
| 471 | } |
---|
| 472 | |
---|
| 473 | RTEMS_INLINE_ROUTINE void _Thread_Set_CPU( |
---|
| 474 | Thread_Control *thread, |
---|
| 475 | Per_CPU_Control *cpu |
---|
| 476 | ) |
---|
| 477 | { |
---|
| 478 | #if defined(RTEMS_SMP) |
---|
[2d36931] | 479 | thread->Scheduler.cpu = cpu; |
---|
[a5ac9da] | 480 | #else |
---|
| 481 | (void) thread; |
---|
| 482 | (void) cpu; |
---|
| 483 | #endif |
---|
| 484 | } |
---|
| 485 | |
---|
[5618c37a] | 486 | /** |
---|
| 487 | * This function returns true if the_thread is the currently executing |
---|
| 488 | * thread, and false otherwise. |
---|
| 489 | */ |
---|
| 490 | |
---|
| 491 | RTEMS_INLINE_ROUTINE bool _Thread_Is_executing ( |
---|
| 492 | const Thread_Control *the_thread |
---|
| 493 | ) |
---|
| 494 | { |
---|
| 495 | return ( the_thread == _Thread_Executing ); |
---|
| 496 | } |
---|
| 497 | |
---|
[38b59a6] | 498 | #if defined(RTEMS_SMP) |
---|
| 499 | /** |
---|
| 500 | * @brief Returns @true in case the thread executes currently on some processor |
---|
| 501 | * in the system, otherwise @a false. |
---|
| 502 | * |
---|
| 503 | * Do not confuse this with _Thread_Is_executing() which checks only the |
---|
| 504 | * current processor. |
---|
| 505 | */ |
---|
| 506 | RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor( |
---|
| 507 | const Thread_Control *the_thread |
---|
| 508 | ) |
---|
| 509 | { |
---|
| 510 | return _CPU_Context_Get_is_executing( &the_thread->Registers ); |
---|
| 511 | } |
---|
| 512 | #endif |
---|
| 513 | |
---|
[f9b19d9] | 514 | /** |
---|
| 515 | * @brief Returns @true and sets time_of_context_switch to the the |
---|
| 516 | * time of the last context switch when the thread is currently executing |
---|
| 517 | * in the system, otherwise @a false. |
---|
| 518 | */ |
---|
| 519 | RTEMS_INLINE_ROUTINE bool _Thread_Get_time_of_last_context_switch( |
---|
| 520 | Thread_Control *the_thread, |
---|
| 521 | Timestamp_Control *time_of_context_switch |
---|
| 522 | ) |
---|
| 523 | { |
---|
| 524 | bool retval = false; |
---|
| 525 | |
---|
| 526 | _Thread_Disable_dispatch(); |
---|
| 527 | #ifndef RTEMS_SMP |
---|
| 528 | if ( _Thread_Executing->Object.id == the_thread->Object.id ) { |
---|
| 529 | *time_of_context_switch = _Thread_Time_of_last_context_switch; |
---|
| 530 | retval = true; |
---|
| 531 | } |
---|
| 532 | #else |
---|
| 533 | if ( _Thread_Is_executing_on_a_processor( the_thread ) ) { |
---|
| 534 | *time_of_context_switch = |
---|
| 535 | _Thread_Get_CPU( the_thread )->time_of_last_context_switch; |
---|
| 536 | retval = true; |
---|
| 537 | } |
---|
| 538 | #endif |
---|
| 539 | _Thread_Enable_dispatch(); |
---|
| 540 | return retval; |
---|
| 541 | } |
---|
| 542 | |
---|
| 543 | |
---|
[5618c37a] | 544 | /** |
---|
| 545 | * This function returns true if the_thread is the heir |
---|
| 546 | * thread, and false otherwise. |
---|
| 547 | */ |
---|
| 548 | |
---|
| 549 | RTEMS_INLINE_ROUTINE bool _Thread_Is_heir ( |
---|
| 550 | const Thread_Control *the_thread |
---|
| 551 | ) |
---|
| 552 | { |
---|
| 553 | return ( the_thread == _Thread_Heir ); |
---|
| 554 | } |
---|
| 555 | |
---|
| 556 | /** |
---|
| 557 | * This routine clears any blocking state for the_thread. It performs |
---|
| 558 | * any necessary scheduling operations including the selection of |
---|
| 559 | * a new heir thread. |
---|
| 560 | */ |
---|
| 561 | |
---|
| 562 | RTEMS_INLINE_ROUTINE void _Thread_Unblock ( |
---|
| 563 | Thread_Control *the_thread |
---|
| 564 | ) |
---|
| 565 | { |
---|
| 566 | _Thread_Clear_state( the_thread, STATES_BLOCKED ); |
---|
| 567 | } |
---|
| 568 | |
---|
| 569 | /** |
---|
| 570 | * This routine resets the current context of the calling thread |
---|
| 571 | * to that of its initial state. |
---|
| 572 | */ |
---|
| 573 | |
---|
[5c731a83] | 574 | RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing ) |
---|
[5618c37a] | 575 | { |
---|
[d19cce29] | 576 | #if defined(RTEMS_SMP) |
---|
| 577 | ISR_Level level; |
---|
| 578 | |
---|
[2b7fe356] | 579 | _Giant_Release( _Per_CPU_Get() ); |
---|
[5c731a83] | 580 | |
---|
[38b59a6] | 581 | _ISR_Disable_without_giant( level ); |
---|
[d19cce29] | 582 | ( void ) level; |
---|
| 583 | #endif |
---|
| 584 | |
---|
[5618c37a] | 585 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
[5c731a83] | 586 | if ( executing->fp_context != NULL ) |
---|
| 587 | _Context_Restore_fp( &executing->fp_context ); |
---|
[5618c37a] | 588 | #endif |
---|
| 589 | |
---|
[5c731a83] | 590 | _CPU_Context_Restart_self( &executing->Registers ); |
---|
[5618c37a] | 591 | } |
---|
| 592 | |
---|
| 593 | /** |
---|
| 594 | * This function returns true if the floating point context of |
---|
| 595 | * the_thread is currently loaded in the floating point unit, and |
---|
| 596 | * false otherwise. |
---|
| 597 | */ |
---|
| 598 | |
---|
| 599 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
| 600 | RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp ( |
---|
| 601 | const Thread_Control *the_thread |
---|
| 602 | ) |
---|
| 603 | { |
---|
| 604 | return ( the_thread == _Thread_Allocated_fp ); |
---|
| 605 | } |
---|
| 606 | #endif |
---|
| 607 | |
---|
[8d6e6eeb] | 608 | /* |
---|
| 609 | * If the CPU has hardware floating point, then we must address saving |
---|
| 610 | * and restoring it as part of the context switch. |
---|
| 611 | * |
---|
| 612 | * The second conditional compilation section selects the algorithm used |
---|
| 613 | * to context switch between floating point tasks. The deferred algorithm |
---|
| 614 | * can be significantly better in a system with few floating point tasks |
---|
| 615 | * because it reduces the total number of save and restore FP context |
---|
| 616 | * operations. However, this algorithm can not be used on all CPUs due |
---|
| 617 | * to unpredictable use of FP registers by some compilers for integer |
---|
| 618 | * operations. |
---|
| 619 | */ |
---|
| 620 | |
---|
| 621 | RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing ) |
---|
| 622 | { |
---|
| 623 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
| 624 | #if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE ) |
---|
| 625 | if ( executing->fp_context != NULL ) |
---|
| 626 | _Context_Save_fp( &executing->fp_context ); |
---|
| 627 | #endif |
---|
| 628 | #endif |
---|
| 629 | } |
---|
| 630 | |
---|
| 631 | RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing ) |
---|
| 632 | { |
---|
| 633 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
| 634 | #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) |
---|
| 635 | if ( (executing->fp_context != NULL) && |
---|
| 636 | !_Thread_Is_allocated_fp( executing ) ) { |
---|
| 637 | if ( _Thread_Allocated_fp != NULL ) |
---|
| 638 | _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); |
---|
| 639 | _Context_Restore_fp( &executing->fp_context ); |
---|
| 640 | _Thread_Allocated_fp = executing; |
---|
| 641 | } |
---|
| 642 | #else |
---|
| 643 | if ( executing->fp_context != NULL ) |
---|
| 644 | _Context_Restore_fp( &executing->fp_context ); |
---|
| 645 | #endif |
---|
| 646 | #endif |
---|
| 647 | } |
---|
| 648 | |
---|
[5618c37a] | 649 | /** |
---|
| 650 | * This routine is invoked when the currently loaded floating |
---|
| 651 | * point context is now longer associated with an active thread. |
---|
| 652 | */ |
---|
| 653 | |
---|
| 654 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
| 655 | RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void ) |
---|
| 656 | { |
---|
| 657 | _Thread_Allocated_fp = NULL; |
---|
| 658 | } |
---|
| 659 | #endif |
---|
| 660 | |
---|
| 661 | /** |
---|
| 662 | * This function returns true if dispatching is disabled, and false |
---|
| 663 | * otherwise. |
---|
| 664 | */ |
---|
| 665 | |
---|
| 666 | RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void ) |
---|
| 667 | { |
---|
| 668 | return ( _Thread_Dispatch_necessary ); |
---|
| 669 | } |
---|
| 670 | |
---|
| 671 | /** |
---|
| 672 | * This function returns true if the_thread is NULL and false otherwise. |
---|
| 673 | */ |
---|
| 674 | |
---|
| 675 | RTEMS_INLINE_ROUTINE bool _Thread_Is_null ( |
---|
| 676 | const Thread_Control *the_thread |
---|
| 677 | ) |
---|
| 678 | { |
---|
| 679 | return ( the_thread == NULL ); |
---|
| 680 | } |
---|
| 681 | |
---|
| 682 | /** |
---|
| 683 | * @brief Is proxy blocking. |
---|
| 684 | * |
---|
| 685 | * status which indicates that a proxy is blocking, and false otherwise. |
---|
| 686 | */ |
---|
| 687 | RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking ( |
---|
| 688 | uint32_t code |
---|
| 689 | ) |
---|
| 690 | { |
---|
| 691 | return (code == THREAD_STATUS_PROXY_BLOCKING); |
---|
| 692 | } |
---|
| 693 | |
---|
[16a41cc] | 694 | RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void) |
---|
| 695 | { |
---|
| 696 | /* Idle threads */ |
---|
[01794eb] | 697 | uint32_t maximum_internal_threads = |
---|
| 698 | rtems_configuration_get_maximum_processors(); |
---|
[16a41cc] | 699 | |
---|
| 700 | /* MPCI thread */ |
---|
| 701 | #if defined(RTEMS_MULTIPROCESSING) |
---|
| 702 | if ( _System_state_Is_multiprocessing ) { |
---|
| 703 | ++maximum_internal_threads; |
---|
| 704 | } |
---|
| 705 | #endif |
---|
| 706 | |
---|
| 707 | return maximum_internal_threads; |
---|
| 708 | } |
---|
| 709 | |
---|
[5618c37a] | 710 | RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void ) |
---|
| 711 | { |
---|
[23fec9f0] | 712 | return (Thread_Control *) |
---|
| 713 | _Objects_Allocate_unprotected( &_Thread_Internal_information ); |
---|
[5618c37a] | 714 | } |
---|
| 715 | |
---|
[38b59a6] | 716 | /** |
---|
| 717 | * @brief Gets the heir of the processor and makes it executing. |
---|
| 718 | * |
---|
| 719 | * The thread dispatch necessary indicator is cleared as a side-effect. |
---|
| 720 | * |
---|
| 721 | * @return The heir thread. |
---|
| 722 | * |
---|
| 723 | * @see _Thread_Dispatch(), _Thread_Start_multitasking() and |
---|
[835b88b] | 724 | * _Thread_Dispatch_update_heir(). |
---|
[38b59a6] | 725 | */ |
---|
| 726 | RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing( |
---|
| 727 | Per_CPU_Control *cpu_self |
---|
| 728 | ) |
---|
| 729 | { |
---|
| 730 | Thread_Control *heir; |
---|
| 731 | |
---|
| 732 | cpu_self->dispatch_necessary = false; |
---|
| 733 | |
---|
| 734 | #if defined( RTEMS_SMP ) |
---|
| 735 | /* |
---|
| 736 | * It is critical that we first update the dispatch necessary and then the |
---|
| 737 | * read the heir so that we don't miss an update by |
---|
[835b88b] | 738 | * _Thread_Dispatch_update_heir(). |
---|
[38b59a6] | 739 | */ |
---|
| 740 | _Atomic_Fence( ATOMIC_ORDER_SEQ_CST ); |
---|
| 741 | #endif |
---|
| 742 | |
---|
| 743 | heir = cpu_self->heir; |
---|
| 744 | cpu_self->executing = heir; |
---|
| 745 | |
---|
| 746 | return heir; |
---|
| 747 | } |
---|
| 748 | |
---|
[835b88b] | 749 | #if defined( RTEMS_SMP ) |
---|
| 750 | RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir( |
---|
| 751 | Per_CPU_Control *cpu_self, |
---|
| 752 | Per_CPU_Control *cpu_for_heir, |
---|
| 753 | Thread_Control *heir |
---|
| 754 | ) |
---|
| 755 | { |
---|
| 756 | cpu_for_heir->heir = heir; |
---|
| 757 | |
---|
| 758 | /* |
---|
| 759 | * It is critical that we first update the heir and then the dispatch |
---|
| 760 | * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an |
---|
| 761 | * update. |
---|
| 762 | */ |
---|
| 763 | _Atomic_Fence( ATOMIC_ORDER_SEQ_CST ); |
---|
| 764 | |
---|
| 765 | /* |
---|
| 766 | * Only update the dispatch necessary indicator if not already set to |
---|
| 767 | * avoid superfluous inter-processor interrupts. |
---|
| 768 | */ |
---|
| 769 | if ( !cpu_for_heir->dispatch_necessary ) { |
---|
| 770 | cpu_for_heir->dispatch_necessary = true; |
---|
| 771 | |
---|
| 772 | if ( cpu_for_heir != cpu_self ) { |
---|
| 773 | _Per_CPU_Send_interrupt( cpu_for_heir ); |
---|
| 774 | } |
---|
| 775 | } |
---|
| 776 | } |
---|
| 777 | #endif |
---|
| 778 | |
---|
[1e51fa5] | 779 | RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used( |
---|
| 780 | Thread_Control *executing, |
---|
| 781 | Timestamp_Control *time_of_last_context_switch |
---|
| 782 | ) |
---|
| 783 | { |
---|
| 784 | Timestamp_Control uptime; |
---|
| 785 | Timestamp_Control ran; |
---|
| 786 | |
---|
| 787 | _TOD_Get_uptime( &uptime ); |
---|
| 788 | _Timestamp_Subtract( |
---|
| 789 | time_of_last_context_switch, |
---|
| 790 | &uptime, |
---|
| 791 | &ran |
---|
| 792 | ); |
---|
| 793 | *time_of_last_context_switch = uptime; |
---|
| 794 | _Timestamp_Add_to( &executing->cpu_time_used, &ran ); |
---|
| 795 | } |
---|
| 796 | |
---|
[0dd732d] | 797 | RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize( |
---|
| 798 | Thread_Action_control *action_control |
---|
| 799 | ) |
---|
| 800 | { |
---|
| 801 | _Chain_Initialize_empty( &action_control->Chain ); |
---|
| 802 | } |
---|
| 803 | |
---|
| 804 | RTEMS_INLINE_ROUTINE void _Thread_Action_initialize( |
---|
| 805 | Thread_Action *action, |
---|
| 806 | Thread_Action_handler handler |
---|
| 807 | ) |
---|
| 808 | { |
---|
| 809 | action->handler = handler; |
---|
| 810 | _Chain_Set_off_chain( &action->Node ); |
---|
| 811 | } |
---|
| 812 | |
---|
| 813 | RTEMS_INLINE_ROUTINE Per_CPU_Control * |
---|
| 814 | _Thread_Action_ISR_disable_and_acquire_for_executing( ISR_Level *level ) |
---|
| 815 | { |
---|
| 816 | Per_CPU_Control *cpu; |
---|
| 817 | |
---|
| 818 | _ISR_Disable_without_giant( *level ); |
---|
| 819 | cpu = _Per_CPU_Get(); |
---|
| 820 | _Per_CPU_Acquire( cpu ); |
---|
| 821 | |
---|
| 822 | return cpu; |
---|
| 823 | } |
---|
| 824 | |
---|
| 825 | RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Action_ISR_disable_and_acquire( |
---|
| 826 | Thread_Control *thread, |
---|
| 827 | ISR_Level *level |
---|
| 828 | ) |
---|
| 829 | { |
---|
| 830 | Per_CPU_Control *cpu; |
---|
| 831 | |
---|
| 832 | _ISR_Disable_without_giant( *level ); |
---|
| 833 | cpu = _Thread_Get_CPU( thread ); |
---|
| 834 | _Per_CPU_Acquire( cpu ); |
---|
| 835 | |
---|
| 836 | return cpu; |
---|
| 837 | } |
---|
| 838 | |
---|
| 839 | RTEMS_INLINE_ROUTINE void _Thread_Action_release_and_ISR_enable( |
---|
| 840 | Per_CPU_Control *cpu, |
---|
| 841 | ISR_Level level |
---|
| 842 | ) |
---|
| 843 | { |
---|
| 844 | _Per_CPU_Release_and_ISR_enable( cpu, level ); |
---|
| 845 | } |
---|
| 846 | |
---|
| 847 | RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action( |
---|
| 848 | Thread_Control *thread, |
---|
| 849 | Thread_Action *action |
---|
| 850 | ) |
---|
| 851 | { |
---|
[6157743] | 852 | Per_CPU_Control *cpu_of_thread; |
---|
[0dd732d] | 853 | ISR_Level level; |
---|
| 854 | |
---|
[6157743] | 855 | cpu_of_thread = _Thread_Action_ISR_disable_and_acquire( thread, &level ); |
---|
| 856 | cpu_of_thread->dispatch_necessary = true; |
---|
| 857 | |
---|
| 858 | #if defined(RTEMS_SMP) |
---|
| 859 | if ( _Per_CPU_Get() != cpu_of_thread ) { |
---|
| 860 | _Per_CPU_Send_interrupt( cpu_of_thread ); |
---|
| 861 | } |
---|
| 862 | #endif |
---|
| 863 | |
---|
[0dd732d] | 864 | _Chain_Append_if_is_off_chain_unprotected( |
---|
| 865 | &thread->Post_switch_actions.Chain, |
---|
| 866 | &action->Node |
---|
| 867 | ); |
---|
[6157743] | 868 | |
---|
| 869 | _Thread_Action_release_and_ISR_enable( cpu_of_thread, level ); |
---|
[0dd732d] | 870 | } |
---|
| 871 | |
---|
[1b1be254] | 872 | RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting( |
---|
| 873 | Thread_Life_state life_state |
---|
| 874 | ) |
---|
| 875 | { |
---|
| 876 | return ( life_state & THREAD_LIFE_RESTARTING ) != 0; |
---|
| 877 | } |
---|
| 878 | |
---|
| 879 | RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating( |
---|
| 880 | Thread_Life_state life_state |
---|
| 881 | ) |
---|
| 882 | { |
---|
| 883 | return ( life_state & THREAD_LIFE_TERMINATING ) != 0; |
---|
| 884 | } |
---|
| 885 | |
---|
| 886 | RTEMS_INLINE_ROUTINE bool _Thread_Is_life_protected( |
---|
| 887 | Thread_Life_state life_state |
---|
| 888 | ) |
---|
| 889 | { |
---|
| 890 | return ( life_state & THREAD_LIFE_PROTECTED ) != 0; |
---|
| 891 | } |
---|
| 892 | |
---|
| 893 | RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing( |
---|
| 894 | Thread_Life_state life_state |
---|
| 895 | ) |
---|
| 896 | { |
---|
[69c3f076] | 897 | return ( life_state & THREAD_LIFE_RESTARTING_TERMINATING ) != 0; |
---|
[1b1be254] | 898 | } |
---|
| 899 | |
---|
[6c7caa1a] | 900 | /** |
---|
| 901 | * @brief Returns true if the thread owns resources, and false otherwise. |
---|
| 902 | * |
---|
| 903 | * Resources are accounted with the Thread_Control::resource_count resource |
---|
| 904 | * counter. This counter is used by semaphore objects for example. |
---|
| 905 | * |
---|
[3045738] | 906 | * In addition to the resource counter there is a resource dependency tree |
---|
| 907 | * available on SMP configurations. In case this tree is non-empty, then the |
---|
| 908 | * thread owns resources. |
---|
| 909 | * |
---|
[6c7caa1a] | 910 | * @param[in] the_thread The thread. |
---|
| 911 | */ |
---|
| 912 | RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources( |
---|
| 913 | const Thread_Control *the_thread |
---|
| 914 | ) |
---|
| 915 | { |
---|
| 916 | bool owns_resources = the_thread->resource_count != 0; |
---|
| 917 | |
---|
[3045738] | 918 | #if defined(RTEMS_SMP) |
---|
| 919 | owns_resources = owns_resources |
---|
| 920 | || _Resource_Node_owns_resources( &the_thread->Resource_node ); |
---|
| 921 | #endif |
---|
| 922 | |
---|
[6c7caa1a] | 923 | return owns_resources; |
---|
| 924 | } |
---|
| 925 | |
---|
[4c8a0ac] | 926 | /** |
---|
| 927 | * @brief The initial thread wait flags value set by _Thread_Initialize(). |
---|
| 928 | */ |
---|
| 929 | #define THREAD_WAIT_FLAGS_INITIAL 0x0U |
---|
| 930 | |
---|
| 931 | /** |
---|
| 932 | * @brief Mask to get the thread wait state flags. |
---|
| 933 | */ |
---|
| 934 | #define THREAD_WAIT_STATE_MASK 0xffU |
---|
| 935 | |
---|
| 936 | /** |
---|
| 937 | * @brief Indicates that the thread begins with the blocking operation. |
---|
| 938 | * |
---|
| 939 | * A blocking operation consists of an optional watchdog initialization and the |
---|
| 940 | * setting of the appropriate thread blocking state with the corresponding |
---|
| 941 | * scheduler block operation. |
---|
| 942 | */ |
---|
| 943 | #define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U |
---|
| 944 | |
---|
| 945 | /** |
---|
| 946 | * @brief Indicates that the thread completed the blocking operation. |
---|
| 947 | */ |
---|
| 948 | #define THREAD_WAIT_STATE_BLOCKED 0x2U |
---|
| 949 | |
---|
| 950 | /** |
---|
| 951 | * @brief Indicates that the thread progress condition is satisfied and it is |
---|
| 952 | * ready to resume execution. |
---|
| 953 | */ |
---|
| 954 | #define THREAD_WAIT_STATE_SATISFIED 0x4U |
---|
| 955 | |
---|
| 956 | /** |
---|
| 957 | * @brief Indicates that a timeout occurred and the thread is ready to resume |
---|
| 958 | * execution. |
---|
| 959 | */ |
---|
| 960 | #define THREAD_WAIT_STATE_TIMEOUT 0x8U |
---|
| 961 | |
---|
| 962 | /** |
---|
| 963 | * @brief Indicates that the thread progress condition was satisfied during the |
---|
| 964 | * blocking operation and it is ready to resume execution. |
---|
| 965 | */ |
---|
| 966 | #define THREAD_WAIT_STATE_INTERRUPT_SATISFIED 0x10U |
---|
| 967 | |
---|
| 968 | /** |
---|
| 969 | * @brief Indicates that a timeout occurred during the blocking operation and |
---|
| 970 | * the thread is ready to resume execution. |
---|
| 971 | */ |
---|
| 972 | #define THREAD_WAIT_STATE_INTERRUPT_TIMEOUT 0x20U |
---|
| 973 | |
---|
| 974 | /** |
---|
| 975 | * @brief Mask to get the thread wait class flags. |
---|
| 976 | */ |
---|
| 977 | #define THREAD_WAIT_CLASS_MASK 0xff00U |
---|
| 978 | |
---|
| 979 | /** |
---|
| 980 | * @brief Indicates that the thread waits for an event. |
---|
| 981 | */ |
---|
| 982 | #define THREAD_WAIT_CLASS_EVENT 0x100U |
---|
| 983 | |
---|
| 984 | /** |
---|
| 985 | * @brief Indicates that the thread waits for a system event. |
---|
| 986 | */ |
---|
| 987 | #define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U |
---|
| 988 | |
---|
| 989 | /** |
---|
| 990 | * @brief Indicates that the thread waits for a object. |
---|
| 991 | */ |
---|
| 992 | #define THREAD_WAIT_CLASS_OBJECT 0x400U |
---|
| 993 | |
---|
| 994 | RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set( |
---|
| 995 | Thread_Control *the_thread, |
---|
| 996 | Thread_Wait_flags flags |
---|
| 997 | ) |
---|
| 998 | { |
---|
| 999 | #if defined(RTEMS_SMP) |
---|
| 1000 | _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED ); |
---|
| 1001 | #else |
---|
| 1002 | the_thread->Wait.flags = flags; |
---|
| 1003 | #endif |
---|
| 1004 | } |
---|
| 1005 | |
---|
| 1006 | RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get( |
---|
| 1007 | const Thread_Control *the_thread |
---|
| 1008 | ) |
---|
| 1009 | { |
---|
| 1010 | #if defined(RTEMS_SMP) |
---|
| 1011 | return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED ); |
---|
| 1012 | #else |
---|
| 1013 | return the_thread->Wait.flags; |
---|
| 1014 | #endif |
---|
| 1015 | } |
---|
| 1016 | |
---|
| 1017 | /** |
---|
| 1018 | * @brief Tries to change the thread wait flags inside a critical section |
---|
| 1019 | * (interrupts disabled). |
---|
| 1020 | * |
---|
| 1021 | * In case the wait flags are equal to the expected wait flags, then the wait |
---|
| 1022 | * flags are set to the desired wait flags. |
---|
| 1023 | * |
---|
| 1024 | * @param[in] the_thread The thread. |
---|
| 1025 | * @param[in] expected_flags The expected wait flags. |
---|
| 1026 | * @param[in] desired_flags The desired wait flags. |
---|
| 1027 | * |
---|
| 1028 | * @retval true The wait flags were equal to the expected wait flags. |
---|
| 1029 | * @retval false Otherwise. |
---|
| 1030 | */ |
---|
| 1031 | RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_critical( |
---|
| 1032 | Thread_Control *the_thread, |
---|
| 1033 | Thread_Wait_flags expected_flags, |
---|
| 1034 | Thread_Wait_flags desired_flags |
---|
| 1035 | ) |
---|
| 1036 | { |
---|
| 1037 | #if defined(RTEMS_SMP) |
---|
| 1038 | return _Atomic_Compare_exchange_uint( |
---|
| 1039 | &the_thread->Wait.flags, |
---|
| 1040 | &expected_flags, |
---|
| 1041 | desired_flags, |
---|
| 1042 | ATOMIC_ORDER_RELAXED, |
---|
| 1043 | ATOMIC_ORDER_RELAXED |
---|
| 1044 | ); |
---|
| 1045 | #else |
---|
| 1046 | bool success = the_thread->Wait.flags == expected_flags; |
---|
| 1047 | |
---|
| 1048 | if ( success ) { |
---|
| 1049 | the_thread->Wait.flags = desired_flags; |
---|
| 1050 | } |
---|
| 1051 | |
---|
| 1052 | return success; |
---|
| 1053 | #endif |
---|
| 1054 | } |
---|
| 1055 | |
---|
| 1056 | /** |
---|
| 1057 | * @brief Tries to change the thread wait flags. |
---|
| 1058 | * |
---|
| 1059 | * @see _Thread_Wait_flags_try_change_critical(). |
---|
| 1060 | */ |
---|
| 1061 | RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change( |
---|
| 1062 | Thread_Control *the_thread, |
---|
| 1063 | Thread_Wait_flags expected_flags, |
---|
| 1064 | Thread_Wait_flags desired_flags |
---|
| 1065 | ) |
---|
| 1066 | { |
---|
| 1067 | bool success; |
---|
| 1068 | #if !defined(RTEMS_SMP) |
---|
| 1069 | ISR_Level level; |
---|
| 1070 | |
---|
| 1071 | _ISR_Disable_without_giant( level ); |
---|
| 1072 | #endif |
---|
| 1073 | |
---|
| 1074 | success = _Thread_Wait_flags_try_change_critical( |
---|
| 1075 | the_thread, |
---|
| 1076 | expected_flags, |
---|
| 1077 | desired_flags |
---|
| 1078 | ); |
---|
| 1079 | |
---|
| 1080 | #if !defined(RTEMS_SMP) |
---|
| 1081 | _ISR_Enable_without_giant( level ); |
---|
| 1082 | #endif |
---|
| 1083 | |
---|
| 1084 | return success; |
---|
| 1085 | } |
---|
| 1086 | |
---|
[38b59a6] | 1087 | RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor( |
---|
| 1088 | Thread_Control *the_thread, |
---|
| 1089 | Per_CPU_Control *cpu |
---|
| 1090 | ) |
---|
| 1091 | { |
---|
| 1092 | #if defined(RTEMS_SMP) && defined(RTEMS_DEBUG) |
---|
[2d36931] | 1093 | the_thread->Scheduler.debug_real_cpu = cpu; |
---|
[38b59a6] | 1094 | #else |
---|
| 1095 | (void) the_thread; |
---|
| 1096 | (void) cpu; |
---|
| 1097 | #endif |
---|
| 1098 | } |
---|
| 1099 | |
---|
[5618c37a] | 1100 | #if !defined(__DYNAMIC_REENT__) |
---|
| 1101 | /** |
---|
| 1102 | * This routine returns the C library re-enterant pointer. |
---|
| 1103 | */ |
---|
| 1104 | |
---|
| 1105 | RTEMS_INLINE_ROUTINE struct _reent **_Thread_Get_libc_reent( void ) |
---|
| 1106 | { |
---|
| 1107 | return _Thread_libc_reent; |
---|
| 1108 | } |
---|
| 1109 | |
---|
| 1110 | /** |
---|
| 1111 | * This routine set the C library re-enterant pointer. |
---|
| 1112 | */ |
---|
| 1113 | |
---|
| 1114 | RTEMS_INLINE_ROUTINE void _Thread_Set_libc_reent ( |
---|
| 1115 | struct _reent **libc_reent |
---|
| 1116 | ) |
---|
| 1117 | { |
---|
| 1118 | _Thread_libc_reent = libc_reent; |
---|
| 1119 | } |
---|
| 1120 | #endif |
---|
| 1121 | |
---|
| 1122 | /** @}*/ |
---|
| 1123 | |
---|
| 1124 | #ifdef __cplusplus |
---|
| 1125 | } |
---|
| 1126 | #endif |
---|
| 1127 | |
---|
| 1128 | #if defined(RTEMS_MULTIPROCESSING) |
---|
| 1129 | #include <rtems/score/threadmp.h> |
---|
| 1130 | #endif |
---|
| 1131 | |
---|
| 1132 | #endif |
---|
| 1133 | /* end of include file */ |
---|