source: rtems/cpukit/score/include/rtems/score/percpu.h @ 8042107a

4.115
Last change on this file since 8042107a was 8042107a, checked in by Sebastian Huber <sebastian.huber@…>, on 06/25/15 at 09:28:59

score: Move SMP CPU_USE_DEFERRED_FP_SWITCH check

  • Property mode set to 100644
File size: 18.1 KB
Line 
1/**
2 *  @file  rtems/score/percpu.h
3 *
4 *  This include file defines the per CPU information required
5 *  by RTEMS.
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.org/license/LICENSE.
15 */
16
17#ifndef _RTEMS_PERCPU_H
18#define _RTEMS_PERCPU_H
19
20#include <rtems/score/cpu.h>
21
22#if defined( ASM )
23  #include <rtems/asm.h>
24#else
25  #include <rtems/score/assert.h>
26  #include <rtems/score/isrlevel.h>
27  #include <rtems/score/smp.h>
28  #include <rtems/score/smplock.h>
29  #include <rtems/score/timestamp.h>
30#endif
31
32#ifdef __cplusplus
33extern "C" {
34#endif
35
36#if defined( RTEMS_SMP )
37  /*
38   * This ensures that on SMP configurations the individual per-CPU controls
39   * are on different cache lines to prevent false sharing.  This define can be
40   * used in assembler code to easily get the per-CPU control for a particular
41   * processor.
42   */
43  #if defined( RTEMS_PROFILING )
44    #define PER_CPU_CONTROL_SIZE_LOG2 8
45  #else
46    #define PER_CPU_CONTROL_SIZE_LOG2 7
47  #endif
48
49  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
50#endif
51
52#if !defined( ASM )
53
54#ifndef __THREAD_CONTROL_DEFINED__
55#define __THREAD_CONTROL_DEFINED__
56typedef struct Thread_Control_struct Thread_Control;
57#endif
58
59struct Scheduler_Context;
60
61/**
62 *  @defgroup PerCPU RTEMS Per CPU Information
63 *
64 *  @ingroup Score
65 *
66 *  This defines the per CPU state information required by RTEMS
67 *  and the BSP.  In an SMP configuration, there will be multiple
68 *  instances of this data structure -- one per CPU -- and the
69 *  current CPU number will be used as the index.
70 */
71
72/**@{*/
73
74#if defined( RTEMS_SMP )
75
76/**
77 * @brief State of a processor.
78 *
79 * The processor state controls the life cycle of processors at the lowest
80 * level.  No multi-threading or other high-level concepts matter here.
81 *
82 * State changes must be initiated via _Per_CPU_State_change().  This function
83 * may not return in case someone requested a shutdown.  The
84 * _SMP_Send_message() function will be used to notify other processors about
85 * state changes if the other processor is in the up state.
86 *
87 * Due to the sequential nature of the basic system initialization one
88 * processor has a special role.  It is the processor executing the boot_card()
89 * function.  This processor is called the boot processor.  All other
90 * processors are called secondary.
91 *
92 * @dot
93 * digraph states {
94 *   i [label="PER_CPU_STATE_INITIAL"];
95 *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
96 *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
97 *   u [label="PER_CPU_STATE_UP"];
98 *   s [label="PER_CPU_STATE_SHUTDOWN"];
99 *   i -> rdy [label="processor\ncompleted initialization"];
100 *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
101 *   reqsm -> u [label="processor\nstarts multitasking"];
102 *   i -> s;
103 *   rdy -> s;
104 *   reqsm -> s;
105 *   u -> s;
106 * }
107 * @enddot
108 */
109typedef enum {
110  /**
111   * @brief The per CPU controls are initialized to zero.
112   *
113   * The boot processor executes the sequential boot code in this state.  The
114   * secondary processors should perform their basic initialization now and
115   * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
116   * is complete.
117   */
118  PER_CPU_STATE_INITIAL,
119
120  /**
121   * @brief Processor is ready to start multitasking.
122   *
123   * The secondary processor performed its basic initialization and is ready to
124   * receive inter-processor interrupts.  Interrupt delivery must be disabled
125   * in this state, but requested inter-processor interrupts must be recorded
126   * and must be delivered once the secondary processor enables interrupts for
127   * the first time.  The boot processor will wait for all secondary processors
128   * to change into this state.  In case a secondary processor does not reach
129   * this state the system will not start.  The secondary processors wait now
130   * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set
131   * by the boot processor once all secondary processors reached the
132   * PER_CPU_STATE_READY_TO_START_MULTITASKING state.
133   */
134  PER_CPU_STATE_READY_TO_START_MULTITASKING,
135
136  /**
137   * @brief Multitasking start of processor is requested.
138   *
139   * The boot processor completed system initialization and is about to perform
140   * a context switch to its heir thread.  Secondary processors should now
141   * issue a context switch to the heir thread.  This normally enables
142   * interrupts on the processor for the first time.
143   */
144  PER_CPU_STATE_REQUEST_START_MULTITASKING,
145
146  /**
147   * @brief Normal multitasking state.
148   */
149  PER_CPU_STATE_UP,
150
151  /**
152   * @brief This is the terminal state.
153   */
154  PER_CPU_STATE_SHUTDOWN
155} Per_CPU_State;
156
157#endif /* defined( RTEMS_SMP ) */
158
159/**
160 * @brief Per-CPU statistics.
161 */
162typedef struct {
163#if defined( RTEMS_PROFILING )
164  /**
165   * @brief The thread dispatch disabled begin instant in CPU counter ticks.
166   *
167   * This value is used to measure the time of disabled thread dispatching.
168   */
169  CPU_Counter_ticks thread_dispatch_disabled_instant;
170
171  /**
172   * @brief The maximum time of disabled thread dispatching in CPU counter
173   * ticks.
174   */
175  CPU_Counter_ticks max_thread_dispatch_disabled_time;
176
177  /**
178   * @brief The maximum time spent to process a single sequence of nested
179   * interrupts in CPU counter ticks.
180   *
181   * This is the time interval between the change of the interrupt nest level
182   * from zero to one and the change back from one to zero.
183   */
184  CPU_Counter_ticks max_interrupt_time;
185
186  /**
187   * @brief The maximum interrupt delay in CPU counter ticks if supported by
188   * the hardware.
189   */
190  CPU_Counter_ticks max_interrupt_delay;
191
192  /**
193   * @brief Count of times when the thread dispatch disable level changes from
194   * zero to one in thread context.
195   *
196   * This value may overflow.
197   */
198  uint64_t thread_dispatch_disabled_count;
199
200  /**
201   * @brief Total time of disabled thread dispatching in CPU counter ticks.
202   *
203   * The average time of disabled thread dispatching is the total time of
204   * disabled thread dispatching divided by the thread dispatch disabled
205   * count.
206   *
207   * This value may overflow.
208   */
209  uint64_t total_thread_dispatch_disabled_time;
210
211  /**
212   * @brief Count of times when the interrupt nest level changes from zero to
213   * one.
214   *
215   * This value may overflow.
216   */
217  uint64_t interrupt_count;
218
219  /**
220   * @brief Total time of interrupt processing in CPU counter ticks.
221   *
222   * The average time of interrupt processing is the total time of interrupt
223   * processing divided by the interrupt count.
224   *
225   * This value may overflow.
226   */
227  uint64_t total_interrupt_time;
228#endif /* defined( RTEMS_PROFILING ) */
229} Per_CPU_Stats;
230
231/**
232 *  @brief Per CPU Core Structure
233 *
234 *  This structure is used to hold per core state information.
235 */
236typedef struct Per_CPU_Control {
237  /**
238   * @brief CPU port specific control.
239   */
240  CPU_Per_CPU_control cpu_per_cpu;
241
242  #if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
243      (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
244    /**
245     * This contains a pointer to the lower range of the interrupt stack for
246     * this CPU.  This is the address allocated and freed.
247     */
248    void  *interrupt_stack_low;
249
250    /**
251     * This contains a pointer to the interrupt stack pointer for this CPU.
252     * It will be loaded at the beginning on an ISR.
253     */
254    void  *interrupt_stack_high;
255  #endif
256
257  /**
258   *  This contains the current interrupt nesting level on this
259   *  CPU.
260   */
261  uint32_t isr_nest_level;
262
263  /**
264   * @brief The thread dispatch critical section nesting counter which is used
265   * to prevent context switches at inopportune moments.
266   */
267  volatile uint32_t thread_dispatch_disable_level;
268
269  /**
270   * @brief This is the thread executing on this processor.
271   *
272   * This field is not protected by a lock.  The only writer is this processor.
273   *
274   * On SMP configurations a thread may be registered as executing on more than
275   * one processor in case a thread migration is in progress.  On SMP
276   * configurations use _Thread_Is_executing_on_a_processor() to figure out if
277   * a thread context is executing on a processor.
278   */
279  Thread_Control *executing;
280
281  /**
282   * @brief This is the heir thread for this processor.
283   *
284   * This field is not protected by a lock.  The only writer after multitasking
285   * start is the scheduler owning this processor.  This processor will set the
286   * dispatch necessary indicator to false, before it reads the heir.  This
287   * field is used in combination with the dispatch necessary indicator.
288   *
289   * A thread can be a heir on at most one processor in the system.
290   *
291   * @see _Thread_Get_heir_and_make_it_executing().
292   */
293  Thread_Control *heir;
294
295  /**
296   * @brief This is set to true when this processor needs to run the
297   * dispatcher.
298   *
299   * It is volatile since interrupts may alter this flag.
300   *
301   * This field is not protected by a lock.  There are two writers after
302   * multitasking start.  The scheduler owning this processor sets this
303   * indicator to true, after it updated the heir field.  This processor sets
304   * this indicator to false, before it reads the heir.  This field is used in
305   * combination with the heir field.
306   *
307   * @see _Thread_Get_heir_and_make_it_executing().
308   */
309  volatile bool dispatch_necessary;
310
311  /** This is the time of the last context switch on this CPU. */
312  Timestamp_Control time_of_last_context_switch;
313
314  #if defined( RTEMS_SMP )
315    /**
316     * @brief This lock protects some parts of the low-level thread dispatching.
317     *
318     * We must use a ticket lock here since we cannot transport a local context
319     * through the context switch.
320     *
321     * @see _Thread_Dispatch().
322     */
323    SMP_ticket_lock_Control Lock;
324
325    /**
326     * @brief Lock statistics context for the per-CPU lock.
327     */
328    SMP_lock_Stats_context Lock_stats_context;
329
330    /**
331     * @brief Context for the Giant lock acquire and release pair of this
332     * processor.
333     */
334    SMP_lock_Context Giant_lock_context;
335
336    /**
337     * @brief Bit field for SMP messages.
338     *
339     * This bit field is not protected locks.  Atomic operations are used to
340     * set and get the message bits.
341     */
342    Atomic_Ulong message;
343
344    /**
345     * @brief The scheduler context of the scheduler owning this processor.
346     */
347    const struct Scheduler_Context *scheduler_context;
348
349    /**
350     * @brief Indicates the current state of the CPU.
351     *
352     * This field is protected by the _Per_CPU_State_lock lock.
353     *
354     * @see _Per_CPU_State_change().
355     */
356    Per_CPU_State state;
357
358    /**
359     * @brief Indicates if the processor has been successfully started via
360     * _CPU_SMP_Start_processor().
361     */
362    bool started;
363  #endif
364
365  Per_CPU_Stats Stats;
366} Per_CPU_Control;
367
368#if defined( RTEMS_SMP )
369typedef struct {
370  Per_CPU_Control per_cpu;
371  char unused_space_for_cache_line_alignment
372    [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
373} Per_CPU_Control_envelope;
374#else
375typedef struct {
376  Per_CPU_Control per_cpu;
377} Per_CPU_Control_envelope;
378#endif
379
380/**
381 *  @brief Set of Per CPU Core Information
382 *
383 *  This is an array of per CPU core information.
384 */
385extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
386
387#if defined( RTEMS_SMP )
388#define _Per_CPU_Acquire( cpu ) \
389  _SMP_ticket_lock_Acquire( \
390    &( cpu )->Lock, \
391    &( cpu )->Lock_stats_context \
392  )
393#else
394#define _Per_CPU_Acquire( cpu ) \
395  do { \
396    (void) ( cpu ); \
397  } while ( 0 )
398#endif
399
400#if defined( RTEMS_SMP )
401#define _Per_CPU_Release( cpu ) \
402  _SMP_ticket_lock_Release( \
403    &( cpu )->Lock, \
404    &( cpu )->Lock_stats_context \
405  )
406#else
407#define _Per_CPU_Release( cpu ) \
408  do { \
409    (void) ( cpu ); \
410  } while ( 0 )
411#endif
412
413#if defined( RTEMS_SMP )
414#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
415  do { \
416    _ISR_Disable_without_giant( isr_cookie ); \
417    _Per_CPU_Acquire( cpu ); \
418  } while ( 0 )
419#else
420#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
421  do { \
422    _ISR_Disable( isr_cookie ); \
423    (void) ( cpu ); \
424  } while ( 0 )
425#endif
426
427#if defined( RTEMS_SMP )
428#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
429  do { \
430    _Per_CPU_Release( cpu ); \
431    _ISR_Enable_without_giant( isr_cookie ); \
432  } while ( 0 )
433#else
434#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
435  do { \
436    (void) ( cpu ); \
437    _ISR_Enable( isr_cookie ); \
438  } while ( 0 )
439#endif
440
441#if defined( RTEMS_SMP )
442#define _Per_CPU_Acquire_all( isr_cookie ) \
443  do { \
444    uint32_t ncpus = _SMP_Get_processor_count(); \
445    uint32_t cpu; \
446    _ISR_Disable( isr_cookie ); \
447    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
448      _Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \
449    } \
450  } while ( 0 )
451#else
452#define _Per_CPU_Acquire_all( isr_cookie ) \
453  _ISR_Disable( isr_cookie )
454#endif
455
456#if defined( RTEMS_SMP )
457#define _Per_CPU_Release_all( isr_cookie ) \
458  do { \
459    uint32_t ncpus = _SMP_Get_processor_count(); \
460    uint32_t cpu; \
461    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
462      _Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \
463    } \
464    _ISR_Enable( isr_cookie ); \
465  } while ( 0 )
466#else
467#define _Per_CPU_Release_all( isr_cookie ) \
468  _ISR_Enable( isr_cookie )
469#endif
470
471/*
472 * If we get the current processor index in a context which allows thread
473 * dispatching, then we may already run on another processor right after the
474 * read instruction.  There are very few cases in which this makes sense (here
475 * we can use _Per_CPU_Get_snapshot()).  All other places must use
476 * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
477 */
478#if defined( _CPU_Get_current_per_CPU_control )
479  #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
480#else
481  #define _Per_CPU_Get_snapshot() \
482    ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
483#endif
484
485#if defined( RTEMS_SMP )
486static inline Per_CPU_Control *_Per_CPU_Get( void )
487{
488  Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
489
490  _Assert(
491    cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
492  );
493
494  return cpu_self;
495}
496#else
497#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
498#endif
499
500static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
501{
502  return &_Per_CPU_Information[ index ].per_cpu;
503}
504
505static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
506{
507  const Per_CPU_Control_envelope *per_cpu_envelope =
508    ( const Per_CPU_Control_envelope * ) cpu;
509
510  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
511}
512
513static inline bool _Per_CPU_Is_processor_started(
514  const Per_CPU_Control *cpu
515)
516{
517#if defined( RTEMS_SMP )
518  return cpu->started;
519#else
520  (void) cpu;
521
522  return true;
523#endif
524}
525
526#if defined( RTEMS_SMP )
527
528static inline void _Per_CPU_Send_interrupt( const Per_CPU_Control *cpu )
529{
530  _CPU_SMP_Send_interrupt( _Per_CPU_Get_index( cpu ) );
531}
532
533/**
534 *  @brief Allocate and Initialize Per CPU Structures
535 *
536 *  This method allocates and initialize the per CPU structure.
537 */
538void _Per_CPU_Initialize(void);
539
540void _Per_CPU_State_change(
541  Per_CPU_Control *cpu,
542  Per_CPU_State new_state
543);
544
545/**
546 * @brief Waits for a processor to change into a non-initial state.
547 *
548 * This function should be called only in _CPU_SMP_Start_processor() if
549 * required by the CPU port or BSP.
550 *
551 * @code
552 * bool _CPU_SMP_Start_processor(uint32_t cpu_index)
553 * {
554 *   uint32_t timeout = 123456;
555 *
556 *   start_the_processor(cpu_index);
557 *
558 *   return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout);
559 * }
560 * @endcode
561 *
562 * @param[in] cpu_index The processor index.
563 * @param[in] timeout_in_ns The timeout in nanoseconds.  Use a value of zero to
564 * wait forever if necessary.
565 *
566 * @retval true The processor is in a non-initial state.
567 * @retval false The timeout expired before the processor reached a non-initial
568 * state.
569 */
570bool _Per_CPU_State_wait_for_non_initial_state(
571  uint32_t cpu_index,
572  uint32_t timeout_in_ns
573);
574
575#endif /* defined( RTEMS_SMP ) */
576
577/*
578 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
579 * Thus when built for non-SMP, there should be no performance penalty.
580 */
581#define _Thread_Dispatch_disable_level \
582  _Per_CPU_Get()->thread_dispatch_disable_level
583#define _Thread_Heir \
584  _Per_CPU_Get()->heir
585#define _Thread_Executing \
586  _Per_CPU_Get()->executing
587#define _ISR_Nest_level \
588  _Per_CPU_Get()->isr_nest_level
589#define _CPU_Interrupt_stack_low \
590  _Per_CPU_Get()->interrupt_stack_low
591#define _CPU_Interrupt_stack_high \
592  _Per_CPU_Get()->interrupt_stack_high
593#define _Thread_Dispatch_necessary \
594  _Per_CPU_Get()->dispatch_necessary
595#define _Thread_Time_of_last_context_switch \
596  _Per_CPU_Get()->time_of_last_context_switch
597
598/**@}*/
599
600#endif /* !defined( ASM ) */
601
602#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
603
604#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
605    (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
606  /*
607   *  If this CPU target lets RTEMS allocates the interrupt stack, then
608   *  we need to have places in the per CPU table to hold them.
609   */
610  #define PER_CPU_INTERRUPT_STACK_LOW \
611    CPU_PER_CPU_CONTROL_SIZE
612  #define PER_CPU_INTERRUPT_STACK_HIGH \
613    PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
614  #define PER_CPU_END_STACK             \
615    PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
616
617  #define INTERRUPT_STACK_LOW \
618    (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
619  #define INTERRUPT_STACK_HIGH \
620    (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
621#else
622  #define PER_CPU_END_STACK \
623    CPU_PER_CPU_CONTROL_SIZE
624#endif
625
626/*
627 *  These are the offsets of the required elements in the per CPU table.
628 */
629#define PER_CPU_ISR_NEST_LEVEL \
630  PER_CPU_END_STACK
631#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
632  PER_CPU_ISR_NEST_LEVEL + 4
633#define PER_CPU_OFFSET_EXECUTING \
634  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
635#define PER_CPU_OFFSET_HEIR \
636  PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
637#define PER_CPU_DISPATCH_NEEDED \
638  PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
639
640#define THREAD_DISPATCH_DISABLE_LEVEL \
641  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
642#define ISR_NEST_LEVEL \
643  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
644#define DISPATCH_NEEDED \
645  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
646
647#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
648
649#ifdef __cplusplus
650}
651#endif
652
653#endif
654/* end of include file */
Note: See TracBrowser for help on using the repository browser.