source: rtems/cpukit/score/include/rtems/score/percpu.h @ e5120a5

4.115
Last change on this file since e5120a5 was e5120a5, checked in by Sebastian Huber <sebastian.huber@…>, on 04/22/14 at 08:10:39

score: Add _CPU_Get_current_per_CPU_control()

Add optional method _CPU_Get_current_per_CPU_control() to obtain the
per-CPU control of the current processor.

This is optional. Not every CPU port needs this. It is only an
optional optimization variant. In case this macro is undefined, the
default implementation using the current processor index will be used.

  • Property mode set to 100644
File size: 17.0 KB
Line 
1/**
2 *  @file  rtems/score/percpu.h
3 *
4 *  This include file defines the per CPU information required
5 *  by RTEMS.
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.org/license/LICENSE.
15 */
16
17#ifndef _RTEMS_PERCPU_H
18#define _RTEMS_PERCPU_H
19
20#include <rtems/score/cpu.h>
21
22#if defined( ASM )
23  #include <rtems/asm.h>
24#else
25  #include <rtems/score/assert.h>
26  #include <rtems/score/isrlevel.h>
27  #include <rtems/score/smp.h>
28  #include <rtems/score/smplock.h>
29  #include <rtems/score/timestamp.h>
30#endif
31
32#ifdef __cplusplus
33extern "C" {
34#endif
35
36#if defined( RTEMS_SMP )
37  /*
38   * This ensures that on SMP configurations the individual per-CPU controls
39   * are on different cache lines to prevent false sharing.  This define can be
40   * used in assembler code to easily get the per-CPU control for a particular
41   * processor.
42   */
43  #if defined( RTEMS_PROFILING )
44    #define PER_CPU_CONTROL_SIZE_LOG2 8
45  #else
46    #define PER_CPU_CONTROL_SIZE_LOG2 7
47  #endif
48
49  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
50#endif
51
52#if !defined( ASM )
53
54#ifndef __THREAD_CONTROL_DEFINED__
55#define __THREAD_CONTROL_DEFINED__
56typedef struct Thread_Control_struct Thread_Control;
57#endif
58
59/**
60 *  @defgroup PerCPU RTEMS Per CPU Information
61 *
62 *  @ingroup Score
63 *
64 *  This defines the per CPU state information required by RTEMS
65 *  and the BSP.  In an SMP configuration, there will be multiple
66 *  instances of this data structure -- one per CPU -- and the
67 *  current CPU number will be used as the index.
68 */
69
70/**@{*/
71
72#if defined( RTEMS_SMP )
73
74#if CPU_USE_DEFERRED_FP_SWITCH == TRUE
75  #error "deferred FP switch not implemented for SMP"
76#endif
77
78/**
79 * @brief State of a processor.
80 *
81 * The processor state controls the life cycle of processors at the lowest
82 * level.  No multi-threading or other high-level concepts matter here.
83 *
84 * State changes must be initiated via _Per_CPU_State_change().  This function
85 * may not return in case someone requested a shutdown.  The
86 * _SMP_Send_message() function will be used to notify other processors about
87 * state changes if the other processor is in the up state.
88 *
89 * Due to the sequential nature of the basic system initialization one
90 * processor has a special role.  It is the processor executing the boot_card()
91 * function.  This processor is called the boot processor.  All other
92 * processors are called secondary.
93 *
94 * @dot
95 * digraph states {
96 *   i [label="PER_CPU_STATE_INITIAL"];
97 *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
98 *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
99 *   u [label="PER_CPU_STATE_UP"];
100 *   s [label="PER_CPU_STATE_SHUTDOWN"];
101 *   i -> rdy [label="processor\ncompleted initialization"];
102 *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
103 *   reqsm -> u [label="processor\nstarts multitasking"];
104 *   i -> s;
105 *   rdy -> s;
106 *   reqsm -> s;
107 *   u -> s;
108 * }
109 * @enddot
110 */
111typedef enum {
112  /**
113   * @brief The per CPU controls are initialized to zero.
114   *
115   * The boot processor executes the sequential boot code in this state.  The
116   * secondary processors should perform their basic initialization now and
117   * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
118   * is complete.
119   */
120  PER_CPU_STATE_INITIAL,
121
122  /**
123   * @brief Processor is ready to start multitasking.
124   *
125   * The secondary processor performed its basic initialization and is ready to
126   * receive inter-processor interrupts.  Interrupt delivery must be disabled
127   * in this state, but requested inter-processor interrupts must be recorded
128   * and must be delivered once the secondary processor enables interrupts for
129   * the first time.  The boot processor will wait for all secondary processors
130   * to change into this state.  In case a secondary processor does not reach
131   * this state the system will not start.  The secondary processors wait now
132   * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set
133   * by the boot processor once all secondary processors reached the
134   * PER_CPU_STATE_READY_TO_START_MULTITASKING state.
135   */
136  PER_CPU_STATE_READY_TO_START_MULTITASKING,
137
138  /**
139   * @brief Multitasking start of processor is requested.
140   *
141   * The boot processor completed system initialization and is about to perform
142   * a context switch to its heir thread.  Secondary processors should now
143   * issue a context switch to the heir thread.  This normally enables
144   * interrupts on the processor for the first time.
145   */
146  PER_CPU_STATE_REQUEST_START_MULTITASKING,
147
148  /**
149   * @brief Normal multitasking state.
150   */
151  PER_CPU_STATE_UP,
152
153  /**
154   * @brief This is the terminal state.
155   */
156  PER_CPU_STATE_SHUTDOWN
157} Per_CPU_State;
158
159#endif /* defined( RTEMS_SMP ) */
160
161/**
162 * @brief Per-CPU statistics.
163 */
164typedef struct {
165#if defined( RTEMS_PROFILING )
166  /**
167   * @brief The thread dispatch disabled begin instant in CPU counter ticks.
168   *
169   * This value is used to measure the time of disabled thread dispatching.
170   */
171  CPU_Counter_ticks thread_dispatch_disabled_instant;
172
173  /**
174   * @brief The maximum time of disabled thread dispatching in CPU counter
175   * ticks.
176   */
177  CPU_Counter_ticks max_thread_dispatch_disabled_time;
178
179  /**
180   * @brief The maximum time spent to process a single sequence of nested
181   * interrupts in CPU counter ticks.
182   *
183   * This is the time interval between the change of the interrupt nest level
184   * from zero to one and the change back from one to zero.
185   */
186  CPU_Counter_ticks max_interrupt_time;
187
188  /**
189   * @brief The maximum interrupt delay in CPU counter ticks if supported by
190   * the hardware.
191   */
192  CPU_Counter_ticks max_interrupt_delay;
193
194  /**
195   * @brief Count of times when the thread dispatch disable level changes from
196   * zero to one in thread context.
197   *
198   * This value may overflow.
199   */
200  uint64_t thread_dispatch_disabled_count;
201
202  /**
203   * @brief Total time of disabled thread dispatching in CPU counter ticks.
204   *
205   * The average time of disabled thread dispatching is the total time of
206   * disabled thread dispatching divided by the thread dispatch disabled
207   * count.
208   *
209   * This value may overflow.
210   */
211  uint64_t total_thread_dispatch_disabled_time;
212
213  /**
214   * @brief Count of times when the interrupt nest level changes from zero to
215   * one.
216   *
217   * This value may overflow.
218   */
219  uint64_t interrupt_count;
220
221  /**
222   * @brief Total time of interrupt processing in CPU counter ticks.
223   *
224   * The average time of interrupt processing is the total time of interrupt
225   * processing divided by the interrupt count.
226   *
227   * This value may overflow.
228   */
229  uint64_t total_interrupt_time;
230#endif /* defined( RTEMS_PROFILING ) */
231} Per_CPU_Stats;
232
233/**
234 *  @brief Per CPU Core Structure
235 *
236 *  This structure is used to hold per core state information.
237 */
238typedef struct Per_CPU_Control {
239  /**
240   * @brief CPU port specific control.
241   */
242  CPU_Per_CPU_control cpu_per_cpu;
243
244  #if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
245      (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
246    /**
247     * This contains a pointer to the lower range of the interrupt stack for
248     * this CPU.  This is the address allocated and freed.
249     */
250    void  *interrupt_stack_low;
251
252    /**
253     * This contains a pointer to the interrupt stack pointer for this CPU.
254     * It will be loaded at the beginning on an ISR.
255     */
256    void  *interrupt_stack_high;
257  #endif
258
259  /**
260   *  This contains the current interrupt nesting level on this
261   *  CPU.
262   */
263  uint32_t isr_nest_level;
264
265  /**
266   * @brief The thread dispatch critical section nesting counter which is used
267   * to prevent context switches at inopportune moments.
268   */
269  volatile uint32_t thread_dispatch_disable_level;
270
271  /** This is set to true when this CPU needs to run the dispatcher. */
272  volatile bool dispatch_necessary;
273
274  /** This is the thread executing on this CPU. */
275  Thread_Control *executing;
276
277  /** This is the heir thread for this this CPU. */
278  Thread_Control *heir;
279
280  /** This is the time of the last context switch on this CPU. */
281  Timestamp_Control time_of_last_context_switch;
282
283  #if defined( RTEMS_SMP )
284    /**
285     * @brief This lock protects the dispatch_necessary, executing, heir and
286     * message fields.
287     *
288     * We must use a ticket lock here since we cannot transport a local context
289     * through the context switch.
290     */
291    SMP_ticket_lock_Control Lock;
292
293    /**
294     * @brief Lock statistics context for the per-CPU lock.
295     */
296    SMP_lock_Stats_context Lock_stats_context;
297
298    /**
299     * @brief Context for the Giant lock acquire and release pair of this
300     * processor.
301     */
302    SMP_lock_Context Giant_lock_context;
303
304    /**
305     *  This is the request for the interrupt.
306     *
307     *  @note This may become a chain protected by atomic instructions.
308     */
309    uint32_t message;
310
311    /**
312     * @brief Indicates the current state of the CPU.
313     *
314     * This field is not protected by the _Per_CPU_State_lock lock.
315     *
316     * @see _Per_CPU_State_change().
317     */
318    Per_CPU_State state;
319
320    /**
321     * @brief Indicates if the processor has been successfully started via
322     * _CPU_SMP_Start_processor().
323     */
324    bool started;
325  #endif
326
327  Per_CPU_Stats Stats;
328} Per_CPU_Control;
329
330#if defined( RTEMS_SMP )
331typedef struct {
332  Per_CPU_Control per_cpu;
333  char unused_space_for_cache_line_alignment
334    [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
335} Per_CPU_Control_envelope;
336#else
337typedef struct {
338  Per_CPU_Control per_cpu;
339} Per_CPU_Control_envelope;
340#endif
341
342/**
343 *  @brief Set of Per CPU Core Information
344 *
345 *  This is an array of per CPU core information.
346 */
347extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
348
349#if defined( RTEMS_SMP )
350#define _Per_CPU_Acquire( cpu ) \
351  _SMP_ticket_lock_Acquire( \
352    &( cpu )->Lock, \
353    &( cpu )->Lock_stats_context \
354  )
355#else
356#define _Per_CPU_Acquire( cpu ) \
357  do { \
358    (void) ( cpu ); \
359  } while ( 0 )
360#endif
361
362#if defined( RTEMS_SMP )
363#define _Per_CPU_Release( cpu ) \
364  _SMP_ticket_lock_Release( \
365    &( cpu )->Lock, \
366    &( cpu )->Lock_stats_context \
367  )
368#else
369#define _Per_CPU_Release( cpu ) \
370  do { \
371    (void) ( cpu ); \
372  } while ( 0 )
373#endif
374
375#if defined( RTEMS_SMP )
376#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
377  do { \
378    _ISR_Disable_without_giant( isr_cookie ); \
379    _Per_CPU_Acquire( cpu ); \
380  } while ( 0 )
381#else
382#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
383  do { \
384    _ISR_Disable( isr_cookie ); \
385    (void) ( cpu ); \
386  } while ( 0 )
387#endif
388
389#if defined( RTEMS_SMP )
390#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
391  do { \
392    _Per_CPU_Release( cpu ); \
393    _ISR_Enable_without_giant( isr_cookie ); \
394  } while ( 0 )
395#else
396#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
397  do { \
398    (void) ( cpu ); \
399    _ISR_Enable( isr_cookie ); \
400  } while ( 0 )
401#endif
402
403#if defined( RTEMS_SMP )
404#define _Per_CPU_Acquire_all( isr_cookie ) \
405  do { \
406    uint32_t ncpus = _SMP_Get_processor_count(); \
407    uint32_t cpu; \
408    _ISR_Disable( isr_cookie ); \
409    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
410      _Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \
411    } \
412  } while ( 0 )
413#else
414#define _Per_CPU_Acquire_all( isr_cookie ) \
415  _ISR_Disable( isr_cookie )
416#endif
417
418#if defined( RTEMS_SMP )
419#define _Per_CPU_Release_all( isr_cookie ) \
420  do { \
421    uint32_t ncpus = _SMP_Get_processor_count(); \
422    uint32_t cpu; \
423    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
424      _Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \
425    } \
426    _ISR_Enable( isr_cookie ); \
427  } while ( 0 )
428#else
429#define _Per_CPU_Release_all( isr_cookie ) \
430  _ISR_Enable( isr_cookie )
431#endif
432
433/*
434 * If we get the current processor index in a context which allows thread
435 * dispatching, then we may already run on another processor right after the
436 * read instruction.  There are very few cases in which this makes sense (here
437 * we can use _Per_CPU_Get_snapshot()).  All other places must use
438 * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
439 */
440#if defined( _CPU_Get_current_per_CPU_control )
441  #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
442#else
443  #define _Per_CPU_Get_snapshot() \
444    ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
445#endif
446
447#if defined( RTEMS_SMP )
448static inline Per_CPU_Control *_Per_CPU_Get( void )
449{
450  Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
451
452  _Assert(
453    cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
454  );
455
456  return cpu_self;
457}
458#else
459#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
460#endif
461
462static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
463{
464  return &_Per_CPU_Information[ index ].per_cpu;
465}
466
467static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
468{
469  const Per_CPU_Control_envelope *per_cpu_envelope =
470    ( const Per_CPU_Control_envelope * ) cpu;
471
472  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
473}
474
475static inline bool _Per_CPU_Is_processor_started(
476  const Per_CPU_Control *cpu
477)
478{
479#if defined( RTEMS_SMP )
480  return cpu->started;
481#else
482  (void) cpu;
483
484  return true;
485#endif
486}
487
488#if defined( RTEMS_SMP )
489
490static inline void _Per_CPU_Send_interrupt( const Per_CPU_Control *cpu )
491{
492  _CPU_SMP_Send_interrupt( _Per_CPU_Get_index( cpu ) );
493}
494
495/**
496 *  @brief Allocate and Initialize Per CPU Structures
497 *
498 *  This method allocates and initialize the per CPU structure.
499 */
500void _Per_CPU_Initialize(void);
501
502void _Per_CPU_State_change(
503  Per_CPU_Control *cpu,
504  Per_CPU_State new_state
505);
506
507/**
508 * @brief Waits for all other processors to enter the ready to start
509 * multitasking state with a timeout in microseconds.
510 *
511 * In case one processor enters the shutdown state, this function does not
512 * return and terminates the system with the SMP_FATAL_SHUTDOWN_EARLY fatal SMP
513 * error.
514 *
515 * This function should be called only in _CPU_SMP_Initialize() if required by
516 * the CPU port or BSP.
517 *
518 * @code
519 * uint32_t _CPU_SMP_Initialize(uint32_t configured_cpu_count)
520 * {
521 *   uint32_t cnt = MIN(get_hardware_cpu_count(), configured_cpu_count);
522 *   uint32_t timeout = 123456;
523 *
524 *   do_some_stuff();
525 *
526 *   return _Per_CPU_State_wait_for_ready_to_start_multitasking(cnt, timeout);
527 * }
528 * @endcode
529 *
530 * In case the timeout expires the count of processors is reduced to reflect
531 * the set of processors which is actually available at this point in time.
532 *
533 * @param[in] processor_count The processor count is the minimum value of the
534 * configured count of processors and the processor count offered by the actual
535 * hardware.
536 * @param[in] timeout_in_us The timeout in microseconds.
537 *
538 * @return The count of processors available for the application in the system.
539 * This value is less than or equal to the processor count.
540 */
541uint32_t _Per_CPU_State_wait_for_ready_to_start_multitasking(
542  uint32_t processor_count,
543  uint32_t timeout_in_us
544);
545
546#endif /* defined( RTEMS_SMP ) */
547
548/*
549 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
550 * Thus when built for non-SMP, there should be no performance penalty.
551 */
552#define _Thread_Dispatch_disable_level \
553  _Per_CPU_Get()->thread_dispatch_disable_level
554#define _Thread_Heir \
555  _Per_CPU_Get()->heir
556#define _Thread_Executing \
557  _Per_CPU_Get()->executing
558#define _ISR_Nest_level \
559  _Per_CPU_Get()->isr_nest_level
560#define _CPU_Interrupt_stack_low \
561  _Per_CPU_Get()->interrupt_stack_low
562#define _CPU_Interrupt_stack_high \
563  _Per_CPU_Get()->interrupt_stack_high
564#define _Thread_Dispatch_necessary \
565  _Per_CPU_Get()->dispatch_necessary
566#define _Thread_Time_of_last_context_switch \
567  _Per_CPU_Get()->time_of_last_context_switch
568
569/**@}*/
570
571#endif /* !defined( ASM ) */
572
573#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
574
575#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
576    (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
577  /*
578   *  If this CPU target lets RTEMS allocates the interrupt stack, then
579   *  we need to have places in the per CPU table to hold them.
580   */
581  #define PER_CPU_INTERRUPT_STACK_LOW \
582    CPU_PER_CPU_CONTROL_SIZE
583  #define PER_CPU_INTERRUPT_STACK_HIGH \
584    PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
585  #define PER_CPU_END_STACK             \
586    PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
587
588  #define INTERRUPT_STACK_LOW \
589    (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
590  #define INTERRUPT_STACK_HIGH \
591    (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
592#else
593  #define PER_CPU_END_STACK \
594    CPU_PER_CPU_CONTROL_SIZE
595#endif
596
597/*
598 *  These are the offsets of the required elements in the per CPU table.
599 */
600#define PER_CPU_ISR_NEST_LEVEL \
601  PER_CPU_END_STACK
602#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
603  PER_CPU_ISR_NEST_LEVEL + 4
604#define PER_CPU_DISPATCH_NEEDED \
605  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
606
607#define THREAD_DISPATCH_DISABLE_LEVEL \
608  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
609#define ISR_NEST_LEVEL \
610  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
611#define DISPATCH_NEEDED \
612  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
613
614#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
615
616#ifdef __cplusplus
617}
618#endif
619
620#endif
621/* end of include file */
Note: See TracBrowser for help on using the repository browser.