source: rtems/cpukit/score/include/rtems/score/percpu.h @ 3ca84d0c

4.115
Last change on this file since 3ca84d0c was 3ca84d0c, checked in by Sebastian Huber <sebastian.huber@…>, on 03/06/14 at 09:53:17

score: Fix per-CPU state documentation

  • Property mode set to 100644
File size: 11.7 KB
Line 
1/**
2 *  @file  rtems/score/percpu.h
3 *
4 *  This include file defines the per CPU information required
5 *  by RTEMS.
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.com/license/LICENSE.
15 */
16
17#ifndef _RTEMS_PERCPU_H
18#define _RTEMS_PERCPU_H
19
20#include <rtems/score/cpu.h>
21
22#if defined( ASM )
23  #include <rtems/asm.h>
24#else
25  #include <rtems/score/assert.h>
26  #include <rtems/score/isrlock.h>
27  #include <rtems/score/timestamp.h>
28  #include <rtems/score/smp.h>
29#endif
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35#if defined( RTEMS_SMP )
36  /*
37   * This ensures that on SMP configurations the individual per-CPU controls
38   * are on different cache lines to prevent false sharing.  This define can be
39   * used in assembler code to easily get the per-CPU control for a particular
40   * processor.
41   */
42  #define PER_CPU_CONTROL_SIZE_LOG2 7
43
44  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
45#endif
46
47#if !defined( ASM )
48
49#ifndef __THREAD_CONTROL_DEFINED__
50#define __THREAD_CONTROL_DEFINED__
51typedef struct Thread_Control_struct Thread_Control;
52#endif
53
54/**
55 *  @defgroup PerCPU RTEMS Per CPU Information
56 *
57 *  @ingroup Score
58 *
59 *  This defines the per CPU state information required by RTEMS
60 *  and the BSP.  In an SMP configuration, there will be multiple
61 *  instances of this data structure -- one per CPU -- and the
62 *  current CPU number will be used as the index.
63 */
64
65/**@{*/
66
67#if defined( RTEMS_SMP )
68
69#if CPU_USE_DEFERRED_FP_SWITCH == TRUE
70  #error "deferred FP switch not implemented for SMP"
71#endif
72
73/**
74 * @brief State of a processor.
75 *
76 * The processor state controls the life cycle of processors at the lowest
77 * level.  No multi-threading or other high-level concepts matter here.
78 *
79 * State changes must be initiated via _Per_CPU_State_change().  This function
80 * may not return in case someone requested a shutdown.  The
81 * _SMP_Send_message() function will be used to notify other processors about
82 * state changes if the other processor is in the up state.
83 *
84 * Due to the sequential nature of the basic system initialization one
85 * processor has a special role.  It is the processor executing the boot_card()
86 * function.  This processor is called the boot processor.  All other
87 * processors are called secondary.
88 *
89 * @dot
90 * digraph states {
91 *   i [label="PER_CPU_STATE_INITIAL"];
92 *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
93 *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
94 *   u [label="PER_CPU_STATE_UP"];
95 *   s [label="PER_CPU_STATE_SHUTDOWN"];
96 *   i -> rdy [label="processor\ncompleted initialization"];
97 *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
98 *   reqsm -> u [label="processor\nstarts multitasking"];
99 *   i -> s;
100 *   rdy -> s;
101 *   reqsm -> s;
102 *   u -> s;
103 * }
104 * @enddot
105 */
106typedef enum {
107  /**
108   * @brief The per CPU controls are initialized to zero.
109   *
110   * The boot processor executes the sequential boot code in this state.  The
111   * secondary processors should perform their basic initialization now and
112   * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
113   * is complete.
114   */
115  PER_CPU_STATE_INITIAL,
116
117  /**
118   * @brief Processor is ready to start multitasking.
119   *
120   * The secondary processor performed its basic initialization and is ready to
121   * receive inter-processor interrupts.  Interrupt delivery must be disabled
122   * in this state, but requested inter-processor interrupts must be recorded
123   * and must be delivered once the secondary processor enables interrupts for
124   * the first time.  The boot processor will wait for all secondary processors
125   * to change into this state.  In case a secondary processor does not reach
126   * this state the system will not start.  The secondary processors wait now
127   * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set
128   * by the boot processor once all secondary processors reached the
129   * PER_CPU_STATE_READY_TO_START_MULTITASKING state.
130   */
131  PER_CPU_STATE_READY_TO_START_MULTITASKING,
132
133  /**
134   * @brief Multitasking start of processor is requested.
135   *
136   * The boot processor completed system initialization and is about to perform
137   * a context switch to its heir thread.  Secondary processors should now
138   * issue a context switch to the heir thread.  This normally enables
139   * interrupts on the processor for the first time.
140   */
141  PER_CPU_STATE_REQUEST_START_MULTITASKING,
142
143  /**
144   * @brief Normal multitasking state.
145   */
146  PER_CPU_STATE_UP,
147
148  /**
149   * @brief This is the terminal state.
150   */
151  PER_CPU_STATE_SHUTDOWN
152} Per_CPU_State;
153
154#endif /* defined( RTEMS_SMP ) */
155
156/**
157 *  @brief Per CPU Core Structure
158 *
159 *  This structure is used to hold per core state information.
160 */
161typedef struct {
162  /**
163   * @brief CPU port specific control.
164   */
165  CPU_Per_CPU_control cpu_per_cpu;
166
167  #if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
168      (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
169    /**
170     * This contains a pointer to the lower range of the interrupt stack for
171     * this CPU.  This is the address allocated and freed.
172     */
173    void  *interrupt_stack_low;
174
175    /**
176     * This contains a pointer to the interrupt stack pointer for this CPU.
177     * It will be loaded at the beginning on an ISR.
178     */
179    void  *interrupt_stack_high;
180  #endif
181
182  /**
183   *  This contains the current interrupt nesting level on this
184   *  CPU.
185   */
186  uint32_t isr_nest_level;
187
188  /**
189   * @brief The thread dispatch critical section nesting counter which is used
190   * to prevent context switches at inopportune moments.
191   */
192  volatile uint32_t thread_dispatch_disable_level;
193
194  /** This is set to true when this CPU needs to run the dispatcher. */
195  volatile bool dispatch_necessary;
196
197  /** This is the thread executing on this CPU. */
198  Thread_Control *executing;
199
200  /** This is the heir thread for this this CPU. */
201  Thread_Control *heir;
202
203  /** This is the time of the last context switch on this CPU. */
204  Timestamp_Control time_of_last_context_switch;
205
206  /**
207   * @brief This lock protects the dispatch_necessary, executing, heir and
208   * message fields.
209   */
210  ISR_lock_Control lock;
211
212  #if defined( RTEMS_SMP )
213    /**
214     *  This is the request for the interrupt.
215     *
216     *  @note This may become a chain protected by atomic instructions.
217     */
218    uint32_t message;
219
220    /**
221     * @brief Indicates the current state of the CPU.
222     *
223     * This field is not protected by the _Per_CPU_State_lock lock.
224     *
225     * @see _Per_CPU_State_change().
226     */
227    Per_CPU_State state;
228  #endif
229} Per_CPU_Control;
230
231#if defined( RTEMS_SMP )
232typedef struct {
233  Per_CPU_Control per_cpu;
234  char unused_space_for_cache_line_alignment
235    [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
236} Per_CPU_Control_envelope;
237#else
238typedef struct {
239  Per_CPU_Control per_cpu;
240} Per_CPU_Control_envelope;
241#endif
242
243/**
244 *  @brief Set of Per CPU Core Information
245 *
246 *  This is an array of per CPU core information.
247 */
248extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
249
250#define _Per_CPU_ISR_disable_and_acquire( per_cpu, isr_cookie ) \
251  _ISR_lock_ISR_disable_and_acquire( &( per_cpu )->lock, isr_cookie )
252
253#define _Per_CPU_Release_and_ISR_enable( per_cpu, isr_cookie ) \
254  _ISR_lock_Release_and_ISR_enable( &( per_cpu )->lock, isr_cookie )
255
256#define _Per_CPU_Acquire( per_cpu ) \
257  _ISR_lock_Acquire( &( per_cpu )->lock )
258
259#define _Per_CPU_Release( per_cpu ) \
260  _ISR_lock_Release( &( per_cpu )->lock )
261
262#if defined( RTEMS_SMP )
263#define _Per_CPU_Acquire_all( isr_cookie ) \
264  do { \
265    uint32_t ncpus = _SMP_Get_processor_count(); \
266    uint32_t cpu; \
267    _ISR_Disable( isr_cookie ); \
268    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
269      _Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \
270    } \
271  } while ( 0 )
272#else
273#define _Per_CPU_Acquire_all( isr_cookie ) \
274  _ISR_Disable( isr_cookie )
275#endif
276
277#if defined( RTEMS_SMP )
278#define _Per_CPU_Release_all( isr_cookie ) \
279  do { \
280    uint32_t ncpus = _SMP_Get_processor_count(); \
281    uint32_t cpu; \
282    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
283      _Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \
284    } \
285    _ISR_Enable( isr_cookie ); \
286  } while ( 0 )
287#else
288#define _Per_CPU_Release_all( isr_cookie ) \
289  _ISR_Enable( isr_cookie )
290#endif
291
292#if defined( RTEMS_SMP )
293static inline Per_CPU_Control *_Per_CPU_Get( void )
294{
295  Per_CPU_Control *per_cpu =
296    &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu;
297
298  _Assert(
299    per_cpu->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
300  );
301
302  return per_cpu;
303}
304#else
305#define _Per_CPU_Get() ( &_Per_CPU_Information[ 0 ].per_cpu )
306#endif
307
308static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
309{
310  return &_Per_CPU_Information[ index ].per_cpu;
311}
312
313static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *per_cpu )
314{
315  const Per_CPU_Control_envelope *per_cpu_envelope =
316    ( const Per_CPU_Control_envelope * ) per_cpu;
317
318  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
319}
320
321#if defined( RTEMS_SMP )
322
323static inline void _Per_CPU_Send_interrupt( const Per_CPU_Control *per_cpu )
324{
325  _CPU_SMP_Send_interrupt( _Per_CPU_Get_index( per_cpu ) );
326}
327
328/**
329 *  @brief Allocate and Initialize Per CPU Structures
330 *
331 *  This method allocates and initialize the per CPU structure.
332 */
333void _Per_CPU_Initialize(void);
334
335void _Per_CPU_State_change(
336  Per_CPU_Control *per_cpu,
337  Per_CPU_State new_state
338);
339
340#endif /* defined( RTEMS_SMP ) */
341
342/*
343 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
344 * Thus when built for non-SMP, there should be no performance penalty.
345 */
346#define _Thread_Dispatch_disable_level \
347  _Per_CPU_Get()->thread_dispatch_disable_level
348#define _Thread_Heir \
349  _Per_CPU_Get()->heir
350#define _Thread_Executing \
351  _Per_CPU_Get()->executing
352#define _ISR_Nest_level \
353  _Per_CPU_Get()->isr_nest_level
354#define _CPU_Interrupt_stack_low \
355  _Per_CPU_Get()->interrupt_stack_low
356#define _CPU_Interrupt_stack_high \
357  _Per_CPU_Get()->interrupt_stack_high
358#define _Thread_Dispatch_necessary \
359  _Per_CPU_Get()->dispatch_necessary
360#define _Thread_Time_of_last_context_switch \
361  _Per_CPU_Get()->time_of_last_context_switch
362
363/**@}*/
364
365#endif /* !defined( ASM ) */
366
367#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
368
369#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
370    (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
371  /*
372   *  If this CPU target lets RTEMS allocates the interrupt stack, then
373   *  we need to have places in the per CPU table to hold them.
374   */
375  #define PER_CPU_INTERRUPT_STACK_LOW \
376    CPU_PER_CPU_CONTROL_SIZE
377  #define PER_CPU_INTERRUPT_STACK_HIGH \
378    PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
379  #define PER_CPU_END_STACK             \
380    PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
381
382  #define INTERRUPT_STACK_LOW \
383    (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
384  #define INTERRUPT_STACK_HIGH \
385    (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
386#else
387  #define PER_CPU_END_STACK \
388    CPU_PER_CPU_CONTROL_SIZE
389#endif
390
391/*
392 *  These are the offsets of the required elements in the per CPU table.
393 */
394#define PER_CPU_ISR_NEST_LEVEL \
395  PER_CPU_END_STACK
396#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
397  PER_CPU_ISR_NEST_LEVEL + 4
398#define PER_CPU_DISPATCH_NEEDED \
399  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
400
401#define THREAD_DISPATCH_DISABLE_LEVEL \
402  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
403#define ISR_NEST_LEVEL \
404  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
405#define DISPATCH_NEEDED \
406  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
407
408#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
409
410#ifdef __cplusplus
411}
412#endif
413
414#endif
415/* end of include file */
Note: See TracBrowser for help on using the repository browser.