source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 1b1be254

4.115
Last change on this file since 1b1be254 was 1b1be254, checked in by Sebastian Huber <sebastian.huber@…>, on 03/25/14 at 09:54:49

score: Thread life cycle re-implementation

The thread deletion is now supported on SMP.

This change fixes the following PRs:

PR1814: SMP race condition between stack free and dispatch

PR2035: psxcancel reveals NULL pointer access in _Thread_queue_Extract()

The POSIX cleanup handler are now called in the right context (should be
called in the context of the terminating thread).

http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_09.html

Add a user extension the reflects a thread termination event. This is
used to reclaim the Newlib reentrancy structure (may use file
operations), the POSIX cleanup handlers and the POSIX key destructors.

  • Property mode set to 100644
File size: 20.0 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/chainimpl.h>
26#include <rtems/score/interr.h>
27#include <rtems/score/isr.h>
28#include <rtems/score/objectimpl.h>
29#include <rtems/score/statesimpl.h>
30#include <rtems/score/sysstate.h>
31#include <rtems/score/todimpl.h>
32#include <rtems/config.h>
33
34#ifdef __cplusplus
35extern "C" {
36#endif
37
38/**
39 * @addtogroup ScoreThread
40 */
41/**@{**/
42
43/**
44 *  The following structure contains the information necessary to manage
45 *  a thread which it is  waiting for a resource.
46 */
47#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
48
49/**
50 *  Self for the GNU Ada Run-Time
51 */
52SCORE_EXTERN void *rtems_ada_self;
53
54/**
55 *  The following defines the information control block used to
56 *  manage this class of objects.
57 */
58SCORE_EXTERN Objects_Information _Thread_Internal_information;
59
60/**
61 *  The following holds how many user extensions are in the system.  This
62 *  is used to determine how many user extension data areas to allocate
63 *  per thread.
64 */
65SCORE_EXTERN uint32_t   _Thread_Maximum_extensions;
66
67/**
68 *  The following is used to manage the length of a timeslice quantum.
69 */
70SCORE_EXTERN uint32_t   _Thread_Ticks_per_timeslice;
71
72/**
73 *  The following points to the thread whose floating point
74 *  context is currently loaded.
75 */
76#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
77SCORE_EXTERN Thread_Control *_Thread_Allocated_fp;
78#endif
79
80#if !defined(__DYNAMIC_REENT__)
81/**
82 * The C library re-enter-rant global pointer. Some C library implementations
83 * such as newlib have a single global pointer that changed during a context
84 * switch. The pointer points to that global pointer. The Thread control block
85 * holds a pointer to the task specific data.
86 */
87SCORE_EXTERN struct _reent **_Thread_libc_reent;
88#endif
89
90/**
91 *  @brief Initialize thread handler.
92 *
93 *  This routine performs the initialization necessary for this handler.
94 */
95void _Thread_Handler_initialization(void);
96
97/**
98 *  @brief Create idle thread.
99 *
100 *  This routine creates the idle thread.
101 *
102 *  @warning No thread should be created before this one.
103 */
104void _Thread_Create_idle(void);
105
106/**
107 *  @brief Start thread multitasking.
108 *
109 *  This routine initiates multitasking.  It is invoked only as
110 *  part of initialization and its invocation is the last act of
111 *  the non-multitasking part of the system initialization.
112 */
113void _Thread_Start_multitasking( void ) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
114
115/**
116 *  @brief Allocate the requested stack space for the thread.
117 *
118 *  Allocate the requested stack space for the thread.
119 *  Set the Start.stack field to the address of the stack.
120 *
121 *  @param[in] the_thread is the thread where the stack space is requested
122 *
123 *  @retval actual size allocated after any adjustment
124 *  @retval zero if the allocation failed
125 */
126size_t _Thread_Stack_Allocate(
127  Thread_Control *the_thread,
128  size_t          stack_size
129);
130
131/**
132 *  @brief Deallocate thread stack.
133 *
134 *  Deallocate the Thread's stack.
135 */
136void _Thread_Stack_Free(
137  Thread_Control *the_thread
138);
139
140/**
141 *  @brief Initialize thread.
142 *
143 *  This routine initializes the specified the thread.  It allocates
144 *  all memory associated with this thread.  It completes by adding
145 *  the thread to the local object table so operations on this
146 *  thread id are allowed.
147 *
148 *  @note If stack_area is NULL, it is allocated from the workspace.
149 *
150 *  @note If the stack is allocated from the workspace, then it is
151 *        guaranteed to be of at least minimum size.
152 */
153bool _Thread_Initialize(
154  Objects_Information                  *information,
155  Thread_Control                       *the_thread,
156  void                                 *stack_area,
157  size_t                                stack_size,
158  bool                                  is_fp,
159  Priority_Control                      priority,
160  bool                                  is_preemptible,
161  Thread_CPU_budget_algorithms          budget_algorithm,
162  Thread_CPU_budget_algorithm_callout   budget_callout,
163  uint32_t                              isr_level,
164  Objects_Name                          name
165);
166
167/**
168 *  @brief Initializes thread and executes it.
169 *
170 *  This routine initializes the executable information for a thread
171 *  and makes it ready to execute.  After this routine executes, the
172 *  thread competes with all other threads for CPU time.
173 *
174 *  @param the_thread is the thread to be initialized
175 *  @param the_prototype
176 *  @param entry_point
177 *  @param pointer_argument
178 *  @param numeric_argument
179 *  @param[in,out] processor The processor if used to start an idle thread
180 *  during system initialization.  Must be set to @c NULL to start a normal
181 *  thread.
182 */
183bool _Thread_Start(
184  Thread_Control            *the_thread,
185  Thread_Start_types         the_prototype,
186  void                      *entry_point,
187  void                      *pointer_argument,
188  Thread_Entry_numeric_type  numeric_argument,
189  Per_CPU_Control           *processor
190);
191
192bool _Thread_Restart(
193  Thread_Control            *the_thread,
194  Thread_Control            *executing,
195  void                      *pointer_argument,
196  Thread_Entry_numeric_type  numeric_argument
197);
198
199bool _Thread_Set_life_protection( bool protect );
200
201void _Thread_Life_action_handler(
202  Thread_Control  *executing,
203  Thread_Action   *action,
204  Per_CPU_Control *cpu,
205  ISR_Level        level
206);
207
208/**
209 * @brief Kills all zombie threads in the system.
210 *
211 * Threads change into the zombie state as the last step in the thread
212 * termination sequence right before a context switch to the heir thread is
213 * initiated.  Since the thread stack is still in use during this phase we have
214 * to postpone the thread stack reclamation until this point.  On SMP
215 * configurations we may have to busy wait for context switch completion here.
216 */
217void _Thread_Kill_zombies( void );
218
219/**
220 * @brief Closes the thread.
221 *
222 * Closes the thread object and starts the thread termination sequence.  In
223 * case the executing thread is not terminated, then this function waits until
224 * the terminating thread reached the zombie state.
225 */
226void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
227
228/**
229 *  @brief Removes any set states for @a the_thread.
230 *
231 *  This routine removes any set states for @a the_thread.  It performs
232 *  any necessary scheduling operations including the selection of
233 *  a new heir thread.
234 *
235 *  - INTERRUPT LATENCY:
236 *    + ready chain
237 *    + select heir
238 */
239void _Thread_Ready(
240  Thread_Control *the_thread
241);
242
243/**
244 *  @brief Clears the indicated STATES for @a the_thread.
245 *
246 *  This routine clears the indicated STATES for @a the_thread.  It performs
247 *  any necessary scheduling operations including the selection of
248 *  a new heir thread.
249 *
250 *  - INTERRUPT LATENCY:
251 *    + priority map
252 *    + select heir
253 */
254void _Thread_Clear_state(
255  Thread_Control *the_thread,
256  States_Control  state
257);
258
259/**
260 *  @brief Sets the indicated @a state for @a the_thread.
261 *
262 *  This routine sets the indicated @a state for @a the_thread.  It performs
263 *  any necessary scheduling operations including the selection of
264 *  a new heir thread.
265 *
266 *  @param[in] the_thread is the thread to set the state for.
267 *  @param[in] state is the state to set the_thread to.
268 *
269 *  - INTERRUPT LATENCY:
270 *   + ready chain
271 *   + select map
272 */
273void _Thread_Set_state(
274  Thread_Control *the_thread,
275  States_Control  state
276);
277
278/**
279 *  @brief Sets the transient state for a thread.
280 *
281 *  This routine sets the Transient state for @a the_thread.  It performs
282 *  any necessary scheduling operations including the selection of
283 *  a new heir thread.
284 *
285 *  @param[in] the_thread is the thread to preform the action upon.
286 *
287 *  - INTERRUPT LATENCY:
288 *    + single case
289 */
290void _Thread_Set_transient(
291  Thread_Control *the_thread
292);
293
294/**
295 *  @brief Initializes enviroment for a thread.
296 *
297 *  This routine initializes the context of @a the_thread to its
298 *  appropriate starting state.
299 *
300 *  @param[in] the_thread is the pointer to the thread control block.
301 */
302void _Thread_Load_environment(
303  Thread_Control *the_thread
304);
305
306/**
307 *  @brief Wrapper function for all threads.
308 *
309 *  This routine is the wrapper function for all threads.  It is
310 *  the starting point for all threads.  The user provided thread
311 *  entry point is invoked by this routine.  Operations
312 *  which must be performed immediately before and after the user's
313 *  thread executes are found here.
314 *
315 *  @note On entry, it is assumed all interrupts are blocked and that this
316 *  routine needs to set the initial isr level.  This may or may not
317 *  actually be needed by the context switch routine and as a result
318 *  interrupts may already be at there proper level.  Either way,
319 *  setting the initial isr level properly here is safe.
320 */
321void _Thread_Handler( void );
322
323/**
324 *  @brief Ended the delay of a thread.
325 *
326 *  This routine is invoked when a thread must be unblocked at the
327 *  end of a time based delay (i.e. wake after or wake when).
328 *  It is called by the watchdog handler.
329 *
330 *  @param[in] id is the thread id
331 */
332void _Thread_Delay_ended(
333  Objects_Id  id,
334  void       *ignored
335);
336
337/**
338 *  @brief Change the priority of a thread.
339 *
340 *  This routine changes the current priority of @a the_thread to
341 *  @a new_priority.  It performs any necessary scheduling operations
342 *  including the selection of a new heir thread.
343 *
344 *  @param[in] the_thread is the thread to change
345 *  @param[in] new_priority is the priority to set @a the_thread to
346 *  @param[in] prepend_it is a switch to prepend the thread
347 */
348void _Thread_Change_priority (
349  Thread_Control   *the_thread,
350  Priority_Control  new_priority,
351  bool              prepend_it
352);
353
354/**
355 *  @brief Set thread priority.
356 *
357 *  This routine updates the priority related fields in the_thread
358 *  control block to indicate the current priority is now new_priority.
359 */
360void _Thread_Set_priority(
361  Thread_Control   *the_thread,
362  Priority_Control  new_priority
363);
364
365/**
366 *  This routine updates the related suspend fields in the_thread
367 *  control block to indicate the current nested level.
368 */
369#define _Thread_Suspend( _the_thread ) \
370        _Thread_Set_state( _the_thread, STATES_SUSPENDED )
371
372/**
373 *  This routine updates the related suspend fields in the_thread
374 *  control block to indicate the current nested level.  A force
375 *  parameter of true will force a resume and clear the suspend count.
376 */
377#define _Thread_Resume( _the_thread ) \
378        _Thread_Clear_state( _the_thread, STATES_SUSPENDED )
379
380/**
381 *  @brief Maps thread Id to a TCB pointer.
382 *
383 *  This function maps thread IDs to thread control
384 *  blocks.  If ID corresponds to a local thread, then it
385 *  returns the_thread control pointer which maps to ID
386 *  and @a location is set to OBJECTS_LOCAL.  If the thread ID is
387 *  global and resides on a remote node, then location is set
388 *  to OBJECTS_REMOTE, and the_thread is undefined.
389 *  Otherwise, location is set to OBJECTS_ERROR and
390 *  the_thread is undefined.
391 *
392 *  @param[in] id is the id of the thread.
393 *  @param[in] location is the location of the block.
394 *
395 *  @note  The performance of many RTEMS services depends upon
396 *         the quick execution of the "good object" path in this
397 *         routine.  If there is a possibility of saving a few
398 *         cycles off the execution time, this routine is worth
399 *         further optimization attention.
400 */
401Thread_Control *_Thread_Get (
402  Objects_Id         id,
403  Objects_Locations *location
404);
405
406/**
407 *  @brief Cancel a blocking operation due to ISR.
408 *
409 *  This method is used to cancel a blocking operation that was
410 *  satisfied from an ISR while the thread executing was in the
411 *  process of blocking.
412 *
413 *  This method will restore the previous ISR disable level during the cancel
414 *  operation.  Thus it is an implicit _ISR_Enable().
415 *
416 *  @param[in] sync_state is the synchronization state
417 *  @param[in] the_thread is the thread whose blocking is canceled
418 *  @param[in] level is the previous ISR disable level
419 *
420 *  @note This is a rare routine in RTEMS.  It is called with
421 *        interrupts disabled and only when an ISR completed
422 *        a blocking condition in process.
423 */
424void _Thread_blocking_operation_Cancel(
425  Thread_blocking_operation_States  sync_state,
426  Thread_Control                   *the_thread,
427  ISR_Level                         level
428);
429
430RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
431  const Thread_Control *thread
432)
433{
434#if defined(RTEMS_SMP)
435  return thread->cpu;
436#else
437  (void) thread;
438
439  return _Per_CPU_Get();
440#endif
441}
442
443RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
444  Thread_Control *thread,
445  Per_CPU_Control *cpu
446)
447{
448#if defined(RTEMS_SMP)
449  thread->cpu = cpu;
450#else
451  (void) thread;
452  (void) cpu;
453#endif
454}
455
456/**
457 * This function returns true if the_thread is the currently executing
458 * thread, and false otherwise.
459 */
460
461RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
462  const Thread_Control *the_thread
463)
464{
465  return ( the_thread == _Thread_Executing );
466}
467
468/**
469 * This function returns true if the_thread is the heir
470 * thread, and false otherwise.
471 */
472
473RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
474  const Thread_Control *the_thread
475)
476{
477  return ( the_thread == _Thread_Heir );
478}
479
480/**
481 * This routine clears any blocking state for the_thread.  It performs
482 * any necessary scheduling operations including the selection of
483 * a new heir thread.
484 */
485
486RTEMS_INLINE_ROUTINE void _Thread_Unblock (
487  Thread_Control *the_thread
488)
489{
490  _Thread_Clear_state( the_thread, STATES_BLOCKED );
491}
492
493/**
494 * This routine resets the current context of the calling thread
495 * to that of its initial state.
496 */
497
498RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing )
499{
500#if defined(RTEMS_SMP)
501  ISR_Level level;
502
503  _Giant_Release();
504
505  _Per_CPU_ISR_disable_and_acquire( _Per_CPU_Get(), level );
506  ( void ) level;
507#endif
508
509#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
510  if ( executing->fp_context != NULL )
511    _Context_Restore_fp( &executing->fp_context );
512#endif
513
514  _CPU_Context_Restart_self( &executing->Registers );
515}
516
517/**
518 * This function returns true if the floating point context of
519 * the_thread is currently loaded in the floating point unit, and
520 * false otherwise.
521 */
522
523#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
524RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
525  const Thread_Control *the_thread
526)
527{
528  return ( the_thread == _Thread_Allocated_fp );
529}
530#endif
531
532/**
533 * This routine is invoked when the currently loaded floating
534 * point context is now longer associated with an active thread.
535 */
536
537#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
538RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
539{
540  _Thread_Allocated_fp = NULL;
541}
542#endif
543
544/**
545 * This function returns true if dispatching is disabled, and false
546 * otherwise.
547 */
548
549RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
550{
551  return ( _Thread_Dispatch_necessary );
552}
553
554/**
555 * This function returns true if the_thread is NULL and false otherwise.
556 */
557
558RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
559  const Thread_Control *the_thread
560)
561{
562  return ( the_thread == NULL );
563}
564
565/**
566 * @brief Is proxy blocking.
567 *
568 * status which indicates that a proxy is blocking, and false otherwise.
569 */
570RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
571  uint32_t   code
572)
573{
574  return (code == THREAD_STATUS_PROXY_BLOCKING);
575}
576
577RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
578{
579  /* Idle threads */
580  uint32_t maximum_internal_threads =
581    rtems_configuration_get_maximum_processors();
582
583  /* MPCI thread */
584#if defined(RTEMS_MULTIPROCESSING)
585  if ( _System_state_Is_multiprocessing ) {
586    ++maximum_internal_threads;
587  }
588#endif
589
590  return maximum_internal_threads;
591}
592
593/**
594 * This routine allocates an internal thread.
595 */
596
597RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
598{
599  return (Thread_Control *) _Objects_Allocate( &_Thread_Internal_information );
600}
601
602RTEMS_INLINE_ROUTINE void _Thread_Request_dispatch_if_executing(
603  Thread_Control *thread
604)
605{
606#if defined(RTEMS_SMP)
607  if ( thread->is_executing ) {
608    const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
609    Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
610
611    cpu_of_thread->dispatch_necessary = true;
612
613    if ( cpu_of_executing != cpu_of_thread ) {
614      _Per_CPU_Send_interrupt( cpu_of_thread );
615    }
616  }
617#else
618  (void) thread;
619#endif
620}
621
622RTEMS_INLINE_ROUTINE void _Thread_Signal_notification( Thread_Control *thread )
623{
624  if ( _ISR_Is_in_progress() && _Thread_Is_executing( thread ) ) {
625    _Thread_Dispatch_necessary = true;
626  } else {
627#if defined(RTEMS_SMP)
628    if ( thread->is_executing ) {
629      const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
630      Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
631
632      if ( cpu_of_executing != cpu_of_thread ) {
633        cpu_of_thread->dispatch_necessary = true;
634        _Per_CPU_Send_interrupt( cpu_of_thread );
635      }
636    }
637#endif
638  }
639}
640
641RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used(
642  Thread_Control *executing,
643  Timestamp_Control *time_of_last_context_switch
644)
645{
646  Timestamp_Control uptime;
647  Timestamp_Control ran;
648
649  _TOD_Get_uptime( &uptime );
650  _Timestamp_Subtract(
651    time_of_last_context_switch,
652    &uptime,
653    &ran
654  );
655  *time_of_last_context_switch = uptime;
656  _Timestamp_Add_to( &executing->cpu_time_used, &ran );
657}
658
659RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
660  Thread_Action_control *action_control
661)
662{
663  _Chain_Initialize_empty( &action_control->Chain );
664}
665
666RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
667  Thread_Action         *action,
668  Thread_Action_handler  handler
669)
670{
671  action->handler = handler;
672  _Chain_Set_off_chain( &action->Node );
673}
674
675RTEMS_INLINE_ROUTINE Per_CPU_Control *
676  _Thread_Action_ISR_disable_and_acquire_for_executing( ISR_Level *level )
677{
678  Per_CPU_Control *cpu;
679
680  _ISR_Disable_without_giant( *level );
681  cpu = _Per_CPU_Get();
682  _Per_CPU_Acquire( cpu );
683
684  return cpu;
685}
686
687RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Action_ISR_disable_and_acquire(
688  Thread_Control *thread,
689  ISR_Level      *level
690)
691{
692  Per_CPU_Control *cpu;
693
694  _ISR_Disable_without_giant( *level );
695  cpu = _Thread_Get_CPU( thread );
696  _Per_CPU_Acquire( cpu );
697
698  return cpu;
699}
700
701RTEMS_INLINE_ROUTINE void _Thread_Action_release_and_ISR_enable(
702  Per_CPU_Control *cpu,
703  ISR_Level level
704)
705{
706  _Per_CPU_Release_and_ISR_enable( cpu, level );
707}
708
709RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
710  Thread_Control *thread,
711  Thread_Action  *action
712)
713{
714  Per_CPU_Control *cpu;
715  ISR_Level        level;
716
717  cpu = _Thread_Action_ISR_disable_and_acquire( thread, &level );
718  _Chain_Append_if_is_off_chain_unprotected(
719    &thread->Post_switch_actions.Chain,
720    &action->Node
721  );
722  _Thread_Action_release_and_ISR_enable( cpu, level );
723}
724
725RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
726  Thread_Life_state life_state
727)
728{
729  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
730}
731
732RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
733  Thread_Life_state life_state
734)
735{
736  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
737}
738
739RTEMS_INLINE_ROUTINE bool _Thread_Is_life_protected(
740  Thread_Life_state life_state
741)
742{
743  return ( life_state & THREAD_LIFE_PROTECTED ) != 0;
744}
745
746RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
747  Thread_Life_state life_state
748)
749{
750  return ( life_state & THREAD_LIFE_RESTARTING_TERMINTING ) != 0;
751}
752
753#if !defined(__DYNAMIC_REENT__)
754/**
755 * This routine returns the C library re-enterant pointer.
756 */
757
758RTEMS_INLINE_ROUTINE struct _reent **_Thread_Get_libc_reent( void )
759{
760  return _Thread_libc_reent;
761}
762
763/**
764 * This routine set the C library re-enterant pointer.
765 */
766
767RTEMS_INLINE_ROUTINE void _Thread_Set_libc_reent (
768  struct _reent **libc_reent
769)
770{
771  _Thread_libc_reent = libc_reent;
772}
773#endif
774
775/** @}*/
776
777#ifdef __cplusplus
778}
779#endif
780
781#if defined(RTEMS_MULTIPROCESSING)
782#include <rtems/score/threadmp.h>
783#endif
784
785#endif
786/* end of include file */
Note: See TracBrowser for help on using the repository browser.