source: rtems/cpukit/score/include/rtems/score/percpu.h @ cef5675

5
Last change on this file since cef5675 was cef5675, checked in by Sebastian Huber <sebastian.huber@…>, on 12/14/15 at 10:47:47

Optional POSIX Cleanup initialization

Update #2408.

  • Property mode set to 100644
File size: 19.0 KB
Line 
1/**
2 *  @file  rtems/score/percpu.h
3 *
4 *  This include file defines the per CPU information required
5 *  by RTEMS.
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.org/license/LICENSE.
15 */
16
17#ifndef _RTEMS_PERCPU_H
18#define _RTEMS_PERCPU_H
19
20#include <rtems/score/cpu.h>
21
22#if defined( ASM )
23  #include <rtems/asm.h>
24#else
25  #include <rtems/score/assert.h>
26  #include <rtems/score/isrlevel.h>
27  #include <rtems/score/smp.h>
28  #include <rtems/score/smplock.h>
29  #include <rtems/score/timestamp.h>
30#endif
31
32#ifdef __cplusplus
33extern "C" {
34#endif
35
36#if defined( RTEMS_SMP )
37  /*
38   * This ensures that on SMP configurations the individual per-CPU controls
39   * are on different cache lines to prevent false sharing.  This define can be
40   * used in assembler code to easily get the per-CPU control for a particular
41   * processor.
42   */
43  #if defined( RTEMS_PROFILING )
44    #define PER_CPU_CONTROL_SIZE_LOG2 8
45  #else
46    #define PER_CPU_CONTROL_SIZE_LOG2 7
47  #endif
48
49  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
50#endif
51
52#if !defined( ASM )
53
54struct _Thread_Control;
55
56struct Scheduler_Context;
57
58/**
59 *  @defgroup PerCPU RTEMS Per CPU Information
60 *
61 *  @ingroup Score
62 *
63 *  This defines the per CPU state information required by RTEMS
64 *  and the BSP.  In an SMP configuration, there will be multiple
65 *  instances of this data structure -- one per CPU -- and the
66 *  current CPU number will be used as the index.
67 */
68
69/**@{*/
70
71#if defined( RTEMS_SMP )
72
73/**
74 * @brief State of a processor.
75 *
76 * The processor state controls the life cycle of processors at the lowest
77 * level.  No multi-threading or other high-level concepts matter here.
78 *
79 * State changes must be initiated via _Per_CPU_State_change().  This function
80 * may not return in case someone requested a shutdown.  The
81 * _SMP_Send_message() function will be used to notify other processors about
82 * state changes if the other processor is in the up state.
83 *
84 * Due to the sequential nature of the basic system initialization one
85 * processor has a special role.  It is the processor executing the boot_card()
86 * function.  This processor is called the boot processor.  All other
87 * processors are called secondary.
88 *
89 * @dot
90 * digraph states {
91 *   i [label="PER_CPU_STATE_INITIAL"];
92 *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
93 *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
94 *   u [label="PER_CPU_STATE_UP"];
95 *   s [label="PER_CPU_STATE_SHUTDOWN"];
96 *   i -> rdy [label="processor\ncompleted initialization"];
97 *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
98 *   reqsm -> u [label="processor\nstarts multitasking"];
99 *   i -> s;
100 *   rdy -> s;
101 *   reqsm -> s;
102 *   u -> s;
103 * }
104 * @enddot
105 */
106typedef enum {
107  /**
108   * @brief The per CPU controls are initialized to zero.
109   *
110   * The boot processor executes the sequential boot code in this state.  The
111   * secondary processors should perform their basic initialization now and
112   * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
113   * is complete.
114   */
115  PER_CPU_STATE_INITIAL,
116
117  /**
118   * @brief Processor is ready to start multitasking.
119   *
120   * The secondary processor performed its basic initialization and is ready to
121   * receive inter-processor interrupts.  Interrupt delivery must be disabled
122   * in this state, but requested inter-processor interrupts must be recorded
123   * and must be delivered once the secondary processor enables interrupts for
124   * the first time.  The boot processor will wait for all secondary processors
125   * to change into this state.  In case a secondary processor does not reach
126   * this state the system will not start.  The secondary processors wait now
127   * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set
128   * by the boot processor once all secondary processors reached the
129   * PER_CPU_STATE_READY_TO_START_MULTITASKING state.
130   */
131  PER_CPU_STATE_READY_TO_START_MULTITASKING,
132
133  /**
134   * @brief Multitasking start of processor is requested.
135   *
136   * The boot processor completed system initialization and is about to perform
137   * a context switch to its heir thread.  Secondary processors should now
138   * issue a context switch to the heir thread.  This normally enables
139   * interrupts on the processor for the first time.
140   */
141  PER_CPU_STATE_REQUEST_START_MULTITASKING,
142
143  /**
144   * @brief Normal multitasking state.
145   */
146  PER_CPU_STATE_UP,
147
148  /**
149   * @brief This is the terminal state.
150   */
151  PER_CPU_STATE_SHUTDOWN
152} Per_CPU_State;
153
154#endif /* defined( RTEMS_SMP ) */
155
156/**
157 * @brief Per-CPU statistics.
158 */
159typedef struct {
160#if defined( RTEMS_PROFILING )
161  /**
162   * @brief The thread dispatch disabled begin instant in CPU counter ticks.
163   *
164   * This value is used to measure the time of disabled thread dispatching.
165   */
166  CPU_Counter_ticks thread_dispatch_disabled_instant;
167
168  /**
169   * @brief The maximum time of disabled thread dispatching in CPU counter
170   * ticks.
171   */
172  CPU_Counter_ticks max_thread_dispatch_disabled_time;
173
174  /**
175   * @brief The maximum time spent to process a single sequence of nested
176   * interrupts in CPU counter ticks.
177   *
178   * This is the time interval between the change of the interrupt nest level
179   * from zero to one and the change back from one to zero.
180   */
181  CPU_Counter_ticks max_interrupt_time;
182
183  /**
184   * @brief The maximum interrupt delay in CPU counter ticks if supported by
185   * the hardware.
186   */
187  CPU_Counter_ticks max_interrupt_delay;
188
189  /**
190   * @brief Count of times when the thread dispatch disable level changes from
191   * zero to one in thread context.
192   *
193   * This value may overflow.
194   */
195  uint64_t thread_dispatch_disabled_count;
196
197  /**
198   * @brief Total time of disabled thread dispatching in CPU counter ticks.
199   *
200   * The average time of disabled thread dispatching is the total time of
201   * disabled thread dispatching divided by the thread dispatch disabled
202   * count.
203   *
204   * This value may overflow.
205   */
206  uint64_t total_thread_dispatch_disabled_time;
207
208  /**
209   * @brief Count of times when the interrupt nest level changes from zero to
210   * one.
211   *
212   * This value may overflow.
213   */
214  uint64_t interrupt_count;
215
216  /**
217   * @brief Total time of interrupt processing in CPU counter ticks.
218   *
219   * The average time of interrupt processing is the total time of interrupt
220   * processing divided by the interrupt count.
221   *
222   * This value may overflow.
223   */
224  uint64_t total_interrupt_time;
225#endif /* defined( RTEMS_PROFILING ) */
226} Per_CPU_Stats;
227
228/**
229 *  @brief Per CPU Core Structure
230 *
231 *  This structure is used to hold per core state information.
232 */
233typedef struct Per_CPU_Control {
234  /**
235   * @brief CPU port specific control.
236   */
237  CPU_Per_CPU_control cpu_per_cpu;
238
239  #if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
240      (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
241    /**
242     * This contains a pointer to the lower range of the interrupt stack for
243     * this CPU.  This is the address allocated and freed.
244     */
245    void  *interrupt_stack_low;
246
247    /**
248     * This contains a pointer to the interrupt stack pointer for this CPU.
249     * It will be loaded at the beginning on an ISR.
250     */
251    void  *interrupt_stack_high;
252  #endif
253
254  /**
255   *  This contains the current interrupt nesting level on this
256   *  CPU.
257   */
258  uint32_t isr_nest_level;
259
260  /**
261   * @brief The thread dispatch critical section nesting counter which is used
262   * to prevent context switches at inopportune moments.
263   */
264  volatile uint32_t thread_dispatch_disable_level;
265
266  /**
267   * @brief This is the thread executing on this processor.
268   *
269   * This field is not protected by a lock.  The only writer is this processor.
270   *
271   * On SMP configurations a thread may be registered as executing on more than
272   * one processor in case a thread migration is in progress.  On SMP
273   * configurations use _Thread_Is_executing_on_a_processor() to figure out if
274   * a thread context is executing on a processor.
275   */
276  struct _Thread_Control *executing;
277
278  /**
279   * @brief This is the heir thread for this processor.
280   *
281   * This field is not protected by a lock.  The only writer after multitasking
282   * start is the scheduler owning this processor.  It is assumed that stores
283   * to pointers are atomic on all supported SMP architectures.  The CPU port
284   * specific code (inter-processor interrupt handling and
285   * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
286   * last value written.
287   *
288   * A thread can be a heir on at most one processor in the system.
289   *
290   * @see _Thread_Get_heir_and_make_it_executing().
291   */
292  struct _Thread_Control *heir;
293
294  /**
295   * @brief This is set to true when this processor needs to run the thread
296   * dispatcher.
297   *
298   * It is volatile since interrupts may alter this flag.
299   *
300   * This field is not protected by a lock and must be accessed only by this
301   * processor.  Code (e.g. scheduler and post-switch action requests) running
302   * on another processors must use an inter-processor interrupt to set the
303   * thread dispatch necessary indicator to true.
304   *
305   * @see _Thread_Get_heir_and_make_it_executing().
306   */
307  volatile bool dispatch_necessary;
308
309  /** This is the time of the last context switch on this CPU. */
310  Timestamp_Control time_of_last_context_switch;
311
312  #if defined( RTEMS_SMP )
313    /**
314     * @brief This lock protects some parts of the low-level thread dispatching.
315     *
316     * We must use a ticket lock here since we cannot transport a local context
317     * through the context switch.
318     *
319     * @see _Thread_Dispatch().
320     */
321    SMP_ticket_lock_Control Lock;
322
323    #if defined( RTEMS_PROFILING )
324      /**
325       * @brief Lock statistics for the per-CPU lock.
326       */
327      SMP_lock_Stats Lock_stats;
328
329      /**
330       * @brief Lock statistics context for the per-CPU lock.
331       */
332      SMP_lock_Stats_context Lock_stats_context;
333    #endif
334
335    /**
336     * @brief Context for the Giant lock acquire and release pair of this
337     * processor.
338     */
339    SMP_lock_Context Giant_lock_context;
340
341    /**
342     * @brief Bit field for SMP messages.
343     *
344     * This bit field is not protected locks.  Atomic operations are used to
345     * set and get the message bits.
346     */
347    Atomic_Ulong message;
348
349    /**
350     * @brief The scheduler context of the scheduler owning this processor.
351     */
352    const struct Scheduler_Context *scheduler_context;
353
354    /**
355     * @brief Indicates the current state of the CPU.
356     *
357     * This field is protected by the _Per_CPU_State_lock lock.
358     *
359     * @see _Per_CPU_State_change().
360     */
361    Per_CPU_State state;
362
363    /**
364     * @brief Indicates if the processor has been successfully started via
365     * _CPU_SMP_Start_processor().
366     */
367    bool started;
368  #endif
369
370  Per_CPU_Stats Stats;
371} Per_CPU_Control;
372
373#if defined( RTEMS_SMP )
374typedef struct {
375  Per_CPU_Control per_cpu;
376  char unused_space_for_cache_line_alignment
377    [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
378} Per_CPU_Control_envelope;
379#else
380typedef struct {
381  Per_CPU_Control per_cpu;
382} Per_CPU_Control_envelope;
383#endif
384
385/**
386 *  @brief Set of Per CPU Core Information
387 *
388 *  This is an array of per CPU core information.
389 */
390extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
391
392#if defined( RTEMS_SMP )
393#define _Per_CPU_Acquire( cpu ) \
394  _SMP_ticket_lock_Acquire( \
395    &( cpu )->Lock, \
396    &( cpu )->Lock_stats, \
397    &( cpu )->Lock_stats_context \
398  )
399#else
400#define _Per_CPU_Acquire( cpu ) \
401  do { \
402    (void) ( cpu ); \
403  } while ( 0 )
404#endif
405
406#if defined( RTEMS_SMP )
407#define _Per_CPU_Release( cpu ) \
408  _SMP_ticket_lock_Release( \
409    &( cpu )->Lock, \
410    &( cpu )->Lock_stats_context \
411  )
412#else
413#define _Per_CPU_Release( cpu ) \
414  do { \
415    (void) ( cpu ); \
416  } while ( 0 )
417#endif
418
419#if defined( RTEMS_SMP )
420#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
421  do { \
422    _ISR_Disable_without_giant( isr_cookie ); \
423    _Per_CPU_Acquire( cpu ); \
424  } while ( 0 )
425#else
426#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
427  do { \
428    _ISR_Disable( isr_cookie ); \
429    (void) ( cpu ); \
430  } while ( 0 )
431#endif
432
433#if defined( RTEMS_SMP )
434#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
435  do { \
436    _Per_CPU_Release( cpu ); \
437    _ISR_Enable_without_giant( isr_cookie ); \
438  } while ( 0 )
439#else
440#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
441  do { \
442    (void) ( cpu ); \
443    _ISR_Enable( isr_cookie ); \
444  } while ( 0 )
445#endif
446
447#if defined( RTEMS_SMP )
448#define _Per_CPU_Acquire_all( isr_cookie ) \
449  do { \
450    uint32_t ncpus = _SMP_Get_processor_count(); \
451    uint32_t cpu; \
452    _ISR_Disable( isr_cookie ); \
453    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
454      _Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \
455    } \
456  } while ( 0 )
457#else
458#define _Per_CPU_Acquire_all( isr_cookie ) \
459  _ISR_Disable( isr_cookie )
460#endif
461
462#if defined( RTEMS_SMP )
463#define _Per_CPU_Release_all( isr_cookie ) \
464  do { \
465    uint32_t ncpus = _SMP_Get_processor_count(); \
466    uint32_t cpu; \
467    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
468      _Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \
469    } \
470    _ISR_Enable( isr_cookie ); \
471  } while ( 0 )
472#else
473#define _Per_CPU_Release_all( isr_cookie ) \
474  _ISR_Enable( isr_cookie )
475#endif
476
477/*
478 * If we get the current processor index in a context which allows thread
479 * dispatching, then we may already run on another processor right after the
480 * read instruction.  There are very few cases in which this makes sense (here
481 * we can use _Per_CPU_Get_snapshot()).  All other places must use
482 * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
483 */
484#if defined( _CPU_Get_current_per_CPU_control )
485  #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
486#else
487  #define _Per_CPU_Get_snapshot() \
488    ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
489#endif
490
491#if defined( RTEMS_SMP )
492static inline Per_CPU_Control *_Per_CPU_Get( void )
493{
494  Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
495
496  _Assert(
497    cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
498  );
499
500  return cpu_self;
501}
502#else
503#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
504#endif
505
506static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
507{
508  return &_Per_CPU_Information[ index ].per_cpu;
509}
510
511static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
512{
513  const Per_CPU_Control_envelope *per_cpu_envelope =
514    ( const Per_CPU_Control_envelope * ) cpu;
515
516  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
517}
518
519static inline struct _Thread_Control *_Per_CPU_Get_executing(
520  const Per_CPU_Control *cpu
521)
522{
523  return cpu->executing;
524}
525
526static inline bool _Per_CPU_Is_processor_started(
527  const Per_CPU_Control *cpu
528)
529{
530#if defined( RTEMS_SMP )
531  return cpu->started;
532#else
533  (void) cpu;
534
535  return true;
536#endif
537}
538
539#if defined( RTEMS_SMP )
540
541static inline void _Per_CPU_Send_interrupt( const Per_CPU_Control *cpu )
542{
543  _CPU_SMP_Send_interrupt( _Per_CPU_Get_index( cpu ) );
544}
545
546/**
547 *  @brief Allocate and Initialize Per CPU Structures
548 *
549 *  This method allocates and initialize the per CPU structure.
550 */
551void _Per_CPU_Initialize(void);
552
553void _Per_CPU_State_change(
554  Per_CPU_Control *cpu,
555  Per_CPU_State new_state
556);
557
558/**
559 * @brief Waits for a processor to change into a non-initial state.
560 *
561 * This function should be called only in _CPU_SMP_Start_processor() if
562 * required by the CPU port or BSP.
563 *
564 * @code
565 * bool _CPU_SMP_Start_processor(uint32_t cpu_index)
566 * {
567 *   uint32_t timeout = 123456;
568 *
569 *   start_the_processor(cpu_index);
570 *
571 *   return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout);
572 * }
573 * @endcode
574 *
575 * @param[in] cpu_index The processor index.
576 * @param[in] timeout_in_ns The timeout in nanoseconds.  Use a value of zero to
577 * wait forever if necessary.
578 *
579 * @retval true The processor is in a non-initial state.
580 * @retval false The timeout expired before the processor reached a non-initial
581 * state.
582 */
583bool _Per_CPU_State_wait_for_non_initial_state(
584  uint32_t cpu_index,
585  uint32_t timeout_in_ns
586);
587
588#endif /* defined( RTEMS_SMP ) */
589
590/*
591 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
592 * Thus when built for non-SMP, there should be no performance penalty.
593 */
594#define _Thread_Dispatch_disable_level \
595  _Per_CPU_Get()->thread_dispatch_disable_level
596#define _Thread_Heir \
597  _Per_CPU_Get()->heir
598#define _Thread_Executing \
599  _Per_CPU_Get()->executing
600#define _ISR_Nest_level \
601  _Per_CPU_Get()->isr_nest_level
602#define _CPU_Interrupt_stack_low \
603  _Per_CPU_Get()->interrupt_stack_low
604#define _CPU_Interrupt_stack_high \
605  _Per_CPU_Get()->interrupt_stack_high
606#define _Thread_Dispatch_necessary \
607  _Per_CPU_Get()->dispatch_necessary
608#define _Thread_Time_of_last_context_switch \
609  _Per_CPU_Get()->time_of_last_context_switch
610
611/**
612 * @brief Returns the thread control block of the executing thread.
613 *
614 * This function can be called in any context.  On SMP configurations
615 * interrupts are disabled to ensure that the processor index is used
616 * consistently.
617 *
618 * @return The thread control block of the executing thread.
619 */
620RTEMS_INLINE_ROUTINE struct _Thread_Control *_Thread_Get_executing( void )
621{
622  struct _Thread_Control *executing;
623
624  #if defined( RTEMS_SMP )
625    ISR_Level level;
626
627    _ISR_Disable_without_giant( level );
628  #endif
629
630  executing = _Thread_Executing;
631
632  #if defined( RTEMS_SMP )
633    _ISR_Enable_without_giant( level );
634  #endif
635
636  return executing;
637}
638
639/**@}*/
640
641#endif /* !defined( ASM ) */
642
643#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
644
645#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
646    (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
647  /*
648   *  If this CPU target lets RTEMS allocates the interrupt stack, then
649   *  we need to have places in the per CPU table to hold them.
650   */
651  #define PER_CPU_INTERRUPT_STACK_LOW \
652    CPU_PER_CPU_CONTROL_SIZE
653  #define PER_CPU_INTERRUPT_STACK_HIGH \
654    PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
655  #define PER_CPU_END_STACK             \
656    PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
657
658  #define INTERRUPT_STACK_LOW \
659    (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
660  #define INTERRUPT_STACK_HIGH \
661    (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
662#else
663  #define PER_CPU_END_STACK \
664    CPU_PER_CPU_CONTROL_SIZE
665#endif
666
667/*
668 *  These are the offsets of the required elements in the per CPU table.
669 */
670#define PER_CPU_ISR_NEST_LEVEL \
671  PER_CPU_END_STACK
672#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
673  PER_CPU_ISR_NEST_LEVEL + 4
674#define PER_CPU_OFFSET_EXECUTING \
675  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
676#define PER_CPU_OFFSET_HEIR \
677  PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
678#define PER_CPU_DISPATCH_NEEDED \
679  PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
680
681#define THREAD_DISPATCH_DISABLE_LEVEL \
682  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
683#define ISR_NEST_LEVEL \
684  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
685#define DISPATCH_NEEDED \
686  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
687
688#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
689
690#ifdef __cplusplus
691}
692#endif
693
694#endif
695/* end of include file */
Note: See TracBrowser for help on using the repository browser.