source: rtems/cpukit/score/inline/rtems/score/thread.inl @ 301fef1

4.104.114.95
Last change on this file since 301fef1 was ef49476, checked in by Ralf Corsepius <ralf.corsepius@…>, on 08/19/08 at 08:32:59

Add header guard to force indirect inclusion.

  • Property mode set to 100644
File size: 7.6 KB
Line 
1/**
2 *  @file  rtems/score/thread.inl
3 *
4 *  This file contains the macro implementation of the inlined
5 *  routines from the Thread handler.
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2007.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.com/license/LICENSE.
15 *
16 *  $Id$
17 */
18
19#ifndef _RTEMS_SCORE_THREAD_H
20# error "Never use <rtems/score/thread.inl> directly; include <rtems/score/thread.h> instead."
21#endif
22
23#ifndef _RTEMS_SCORE_THREAD_INL
24#define _RTEMS_SCORE_THREAD_INL
25
26#include <rtems/score/sysstate.h>
27
28/**
29 *  @addtogroup ScoreThread
30 *  @{
31 */
32
33/**
34 *  This routine halts multitasking and returns control to
35 *  the "thread" (i.e. the BSP) which initially invoked the
36 *  routine which initialized the system.
37 */
38
39RTEMS_INLINE_ROUTINE void _Thread_Stop_multitasking( void )
40{
41  Context_Control context_area;
42  Context_Control *context_p = &context_area;
43
44  if ( _System_state_Is_up(_System_state_Get ()) )
45    context_p = &_Thread_Executing->Registers;
46
47  _Context_Switch( context_p, &_Thread_BSP_context );
48}
49
50/**
51 *  This function returns TRUE if the_thread is the currently executing
52 *  thread, and FALSE otherwise.
53 */
54
55RTEMS_INLINE_ROUTINE boolean _Thread_Is_executing (
56  const Thread_Control *the_thread
57)
58{
59  return ( the_thread == _Thread_Executing );
60}
61
62/**
63 *  This function returns TRUE if the_thread is the heir
64 *  thread, and FALSE otherwise.
65 */
66
67RTEMS_INLINE_ROUTINE boolean _Thread_Is_heir (
68  const Thread_Control *the_thread
69)
70{
71  return ( the_thread == _Thread_Heir );
72}
73
74/**
75 *  This function returns TRUE if the currently executing thread
76 *  is also the heir thread, and FALSE otherwise.
77 */
78
79RTEMS_INLINE_ROUTINE boolean _Thread_Is_executing_also_the_heir( void )
80{
81  return ( _Thread_Executing == _Thread_Heir );
82}
83
84/**
85 *  This routine clears any blocking state for the_thread.  It performs
86 *  any necessary scheduling operations including the selection of
87 *  a new heir thread.
88 */
89
90RTEMS_INLINE_ROUTINE void _Thread_Unblock (
91  Thread_Control *the_thread
92)
93{
94  _Thread_Clear_state( the_thread, STATES_BLOCKED );
95}
96
97/**
98 *  This routine resets the current context of the calling thread
99 *  to that of its initial state.
100 */
101
102RTEMS_INLINE_ROUTINE void _Thread_Restart_self( void )
103{
104#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
105  if ( _Thread_Executing->fp_context != NULL )
106    _Context_Restore_fp( &_Thread_Executing->fp_context );
107#endif
108
109  _CPU_Context_Restart_self( &_Thread_Executing->Registers );
110}
111
112/**
113 *  This function returns a pointer to the highest priority
114 *  ready thread.
115 */
116
117RTEMS_INLINE_ROUTINE void _Thread_Calculate_heir( void )
118{
119  _Thread_Heir = (Thread_Control *)
120    _Thread_Ready_chain[ _Priority_Get_highest() ].first;
121}
122
123/**
124 *  This function returns TRUE if the floating point context of
125 *  the_thread is currently loaded in the floating point unit, and
126 *  FALSE otherwise.
127 */
128
129#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
130RTEMS_INLINE_ROUTINE boolean _Thread_Is_allocated_fp (
131  const Thread_Control *the_thread
132)
133{
134  return ( the_thread == _Thread_Allocated_fp );
135}
136#endif
137
138/**
139 *  This routine is invoked when the currently loaded floating
140 *  point context is now longer associated with an active thread.
141 */
142
143#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
144RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
145{
146  _Thread_Allocated_fp = NULL;
147}
148#endif
149
150/**
151 *  This routine prevents dispatching.
152 */
153
154#if defined(RTEMS_HEAVY_STACK_DEBUG) || defined(RTEMS_HEAVY_MALLOC_DEBUG)
155  #include <rtems/bspIo.h>
156  #include <rtems/fatal.h>
157  #include <rtems/stackchk.h>
158  #include <rtems/score/sysstate.h>
159  #include <rtems/score/heap.h>
160
161  /*
162   * This is currently not defined in any .h file, so we have to
163   * extern it here.
164   */
165  extern Heap_Control  RTEMS_Malloc_Heap;
166#endif
167
168RTEMS_INLINE_ROUTINE void _Thread_Disable_dispatch( void )
169{
170  /*
171   *  This check is very brutal to system performance but is very helpful
172   *  at finding blown stack problems.  If you have a stack problem and
173   *  need help finding it, then uncomment this code.  Every system
174   *  call will check the stack and since mutexes are used frequently
175   *  in most systems, you might get lucky.
176   */
177  #if defined(RTEMS_HEAVY_STACK_DEBUG)
178    if (_System_state_Is_up(_System_state_Get()) && (_ISR_Nest_level == 0)) {
179      if ( rtems_stack_checker_is_blown() ) {
180        printk( "Stack blown!!\n" );
181        rtems_fatal_error_occurred( 99 );
182      }
183    }
184  #endif
185
186  _Thread_Dispatch_disable_level += 1;
187  RTEMS_COMPILER_MEMORY_BARRIER();
188
189  /*
190   * This check is even more brutal than the other one.  This enables
191   * malloc heap integrity checking upon entry to every system call.
192   */
193  #if defined(RTEMS_HEAVY_MALLOC_DEBUG)
194    if ( _Thread_Dispatch_disable_level == 1 ) {
195      _Heap_Walk( &RTEMS_Malloc_Heap,99, FALSE );
196    }
197  #endif
198}
199
200/**
201 *  This routine allows dispatching to occur again.  If this is
202 *  the outer most dispatching critical section, then a dispatching
203 *  operation will be performed and, if necessary, control of the
204 *  processor will be transferred to the heir thread.
205 */
206
207#if ( (CPU_INLINE_ENABLE_DISPATCH == FALSE) || \
208      (__RTEMS_DO_NOT_INLINE_THREAD_ENABLE_DISPATCH__ == 1) )
209void _Thread_Enable_dispatch( void );
210#else
211/* inlining of enable dispatching must be true */
212RTEMS_INLINE_ROUTINE void _Thread_Enable_dispatch( void )
213{
214  RTEMS_COMPILER_MEMORY_BARRIER();
215  if ( (--_Thread_Dispatch_disable_level) == 0 )
216    _Thread_Dispatch();
217}
218#endif
219
220
221/**
222 *  This routine allows dispatching to occur again.  However,
223 *  no dispatching operation is performed even if this is the outer
224 *  most dispatching critical section.
225 */
226
227RTEMS_INLINE_ROUTINE void _Thread_Unnest_dispatch( void )
228{
229  RTEMS_COMPILER_MEMORY_BARRIER();
230  _Thread_Dispatch_disable_level -= 1;
231}
232
233/**
234 *  This function returns TRUE if dispatching is disabled, and FALSE
235 *  otherwise.
236 */
237
238RTEMS_INLINE_ROUTINE boolean _Thread_Is_dispatching_enabled( void )
239{
240  return ( _Thread_Dispatch_disable_level == 0 );
241}
242
243/**
244 *  This function returns TRUE if dispatching is disabled, and FALSE
245 *  otherwise.
246 */
247
248RTEMS_INLINE_ROUTINE boolean _Thread_Is_context_switch_necessary( void )
249{
250  return ( _Context_Switch_necessary );
251}
252
253/**
254 *  This routine initializes the thread dispatching subsystem.
255 */
256
257RTEMS_INLINE_ROUTINE void _Thread_Dispatch_initialization( void )
258{
259  _Thread_Dispatch_disable_level = 1;
260}
261
262/**
263 *  This function returns TRUE if the_thread is NULL and FALSE otherwise.
264 */
265
266RTEMS_INLINE_ROUTINE boolean _Thread_Is_null (
267  const Thread_Control *the_thread
268)
269{
270  return ( the_thread == NULL );
271}
272
273/** @brief _Thread_Is_proxy_blocking
274 *
275 *  status which indicates that a proxy is blocking, and FALSE otherwise.
276 */
277RTEMS_INLINE_ROUTINE boolean _Thread_Is_proxy_blocking (
278  uint32_t   code
279)
280{
281  return (code == THREAD_STATUS_PROXY_BLOCKING);
282}
283
284/**
285 *  This routine allocates an internal thread.
286 */
287 
288RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
289{
290  return (Thread_Control *) _Objects_Allocate( &_Thread_Internal_information );
291}
292 
293/**
294 *  This routine frees an internal thread.
295 */
296 
297RTEMS_INLINE_ROUTINE void _Thread_Internal_free (
298  Thread_Control *the_task
299)
300{
301  _Objects_Free( &_Thread_Internal_information, &the_task->Object );
302}
303
304/**
305 *  This routine returns the C library re-enterant pointer.
306 */
307 
308RTEMS_INLINE_ROUTINE struct _reent **_Thread_Get_libc_reent( void )
309{
310  return _Thread_libc_reent;
311}
312
313/**
314 *  This routine set the C library re-enterant pointer.
315 */
316 
317RTEMS_INLINE_ROUTINE void _Thread_Set_libc_reent (
318  struct _reent **libc_reent
319)
320{
321  _Thread_libc_reent = libc_reent;
322}
323
324/**@}*/
325
326#endif
327/* end of include file */
Note: See TracBrowser for help on using the repository browser.