source: rtems/cpukit/include/rtems/score/percpu.h @ ad87de4

5
Last change on this file since ad87de4 was ad87de4, checked in by Sebastian Huber <sebastian.huber@…>, on 04/11/19 at 06:54:29

score: Rename _SMP_Get_processor_count()

Rename _SMP_Get_processor_count() in _SMP_Get_processor_maximum() to be
in line with the API level rtems_scheduler_get_processor_maximum().

Update #3732.

  • Property mode set to 100644
File size: 23.2 KB
Line 
1/**
2 *  @file
3 *
4 *  This include file defines the per CPU information required
5 *  by RTEMS.
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (c) 2012, 2018 embedded brains GmbH
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_PERCPU_H
20#define _RTEMS_PERCPU_H
21
22#include <rtems/score/cpuimpl.h>
23
24#if defined( ASM )
25  #include <rtems/asm.h>
26#else
27  #include <rtems/score/assert.h>
28  #include <rtems/score/chain.h>
29  #include <rtems/score/isrlock.h>
30  #include <rtems/score/smp.h>
31  #include <rtems/score/smplock.h>
32  #include <rtems/score/timestamp.h>
33  #include <rtems/score/watchdog.h>
34#endif
35
36#ifdef __cplusplus
37extern "C" {
38#endif
39
40#if defined(RTEMS_SMP)
41  #if defined(RTEMS_PROFILING)
42    #define PER_CPU_CONTROL_SIZE_APPROX ( 512 + CPU_INTERRUPT_FRAME_SIZE )
43  #elif defined(RTEMS_DEBUG) || CPU_SIZEOF_POINTER > 4
44    #define PER_CPU_CONTROL_SIZE_APPROX ( 256 + CPU_INTERRUPT_FRAME_SIZE )
45  #else
46    #define PER_CPU_CONTROL_SIZE_APPROX ( 128 + CPU_INTERRUPT_FRAME_SIZE )
47  #endif
48
49  /*
50   * This ensures that on SMP configurations the individual per-CPU controls
51   * are on different cache lines to prevent false sharing.  This define can be
52   * used in assembler code to easily get the per-CPU control for a particular
53   * processor.
54   */
55  #if PER_CPU_CONTROL_SIZE_APPROX > 1024
56    #define PER_CPU_CONTROL_SIZE_LOG2 11
57  #elif PER_CPU_CONTROL_SIZE_APPROX > 512
58    #define PER_CPU_CONTROL_SIZE_LOG2 10
59  #elif PER_CPU_CONTROL_SIZE_APPROX > 256
60    #define PER_CPU_CONTROL_SIZE_LOG2 9
61  #elif PER_CPU_CONTROL_SIZE_APPROX > 128
62    #define PER_CPU_CONTROL_SIZE_LOG2 8
63  #else
64    #define PER_CPU_CONTROL_SIZE_LOG2 7
65  #endif
66
67  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
68#endif
69
70#if !defined( ASM )
71
72struct Record_Control;
73
74struct _Thread_Control;
75
76struct Scheduler_Context;
77
78/**
79 *  @defgroup PerCPU RTEMS Per CPU Information
80 *
81 *  @ingroup RTEMSScore
82 *
83 *  This defines the per CPU state information required by RTEMS
84 *  and the BSP.  In an SMP configuration, there will be multiple
85 *  instances of this data structure -- one per CPU -- and the
86 *  current CPU number will be used as the index.
87 */
88
89/**@{*/
90
91#if defined( RTEMS_SMP )
92
93/**
94 * @brief State of a processor.
95 *
96 * The processor state controls the life cycle of processors at the lowest
97 * level.  No multi-threading or other high-level concepts matter here.
98 *
99 * State changes must be initiated via _Per_CPU_State_change().  This function
100 * may not return in case someone requested a shutdown.  The
101 * _SMP_Send_message() function will be used to notify other processors about
102 * state changes if the other processor is in the up state.
103 *
104 * Due to the sequential nature of the basic system initialization one
105 * processor has a special role.  It is the processor executing the boot_card()
106 * function.  This processor is called the boot processor.  All other
107 * processors are called secondary.
108 *
109 * @dot
110 * digraph states {
111 *   i [label="PER_CPU_STATE_INITIAL"];
112 *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
113 *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
114 *   u [label="PER_CPU_STATE_UP"];
115 *   s [label="PER_CPU_STATE_SHUTDOWN"];
116 *   i -> rdy [label="processor\ncompleted initialization"];
117 *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
118 *   reqsm -> u [label="processor\nstarts multitasking"];
119 *   i -> s;
120 *   rdy -> s;
121 *   reqsm -> s;
122 *   u -> s;
123 * }
124 * @enddot
125 */
126typedef enum {
127  /**
128   * @brief The per CPU controls are initialized to zero.
129   *
130   * The boot processor executes the sequential boot code in this state.  The
131   * secondary processors should perform their basic initialization now and
132   * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
133   * is complete.
134   */
135  PER_CPU_STATE_INITIAL,
136
137  /**
138   * @brief Processor is ready to start multitasking.
139   *
140   * The secondary processor performed its basic initialization and is ready to
141   * receive inter-processor interrupts.  Interrupt delivery must be disabled
142   * in this state, but requested inter-processor interrupts must be recorded
143   * and must be delivered once the secondary processor enables interrupts for
144   * the first time.  The boot processor will wait for all secondary processors
145   * to change into this state.  In case a secondary processor does not reach
146   * this state the system will not start.  The secondary processors wait now
147   * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set
148   * by the boot processor once all secondary processors reached the
149   * PER_CPU_STATE_READY_TO_START_MULTITASKING state.
150   */
151  PER_CPU_STATE_READY_TO_START_MULTITASKING,
152
153  /**
154   * @brief Multitasking start of processor is requested.
155   *
156   * The boot processor completed system initialization and is about to perform
157   * a context switch to its heir thread.  Secondary processors should now
158   * issue a context switch to the heir thread.  This normally enables
159   * interrupts on the processor for the first time.
160   */
161  PER_CPU_STATE_REQUEST_START_MULTITASKING,
162
163  /**
164   * @brief Normal multitasking state.
165   */
166  PER_CPU_STATE_UP,
167
168  /**
169   * @brief This is the terminal state.
170   */
171  PER_CPU_STATE_SHUTDOWN
172} Per_CPU_State;
173
174#endif /* defined( RTEMS_SMP ) */
175
176/**
177 * @brief Per-CPU statistics.
178 */
179typedef struct {
180#if defined( RTEMS_PROFILING )
181  /**
182   * @brief The thread dispatch disabled begin instant in CPU counter ticks.
183   *
184   * This value is used to measure the time of disabled thread dispatching.
185   */
186  CPU_Counter_ticks thread_dispatch_disabled_instant;
187
188  /**
189   * @brief The maximum time of disabled thread dispatching in CPU counter
190   * ticks.
191   */
192  CPU_Counter_ticks max_thread_dispatch_disabled_time;
193
194  /**
195   * @brief The maximum time spent to process a single sequence of nested
196   * interrupts in CPU counter ticks.
197   *
198   * This is the time interval between the change of the interrupt nest level
199   * from zero to one and the change back from one to zero.
200   */
201  CPU_Counter_ticks max_interrupt_time;
202
203  /**
204   * @brief The maximum interrupt delay in CPU counter ticks if supported by
205   * the hardware.
206   */
207  CPU_Counter_ticks max_interrupt_delay;
208
209  /**
210   * @brief Count of times when the thread dispatch disable level changes from
211   * zero to one in thread context.
212   *
213   * This value may overflow.
214   */
215  uint64_t thread_dispatch_disabled_count;
216
217  /**
218   * @brief Total time of disabled thread dispatching in CPU counter ticks.
219   *
220   * The average time of disabled thread dispatching is the total time of
221   * disabled thread dispatching divided by the thread dispatch disabled
222   * count.
223   *
224   * This value may overflow.
225   */
226  uint64_t total_thread_dispatch_disabled_time;
227
228  /**
229   * @brief Count of times when the interrupt nest level changes from zero to
230   * one.
231   *
232   * This value may overflow.
233   */
234  uint64_t interrupt_count;
235
236  /**
237   * @brief Total time of interrupt processing in CPU counter ticks.
238   *
239   * The average time of interrupt processing is the total time of interrupt
240   * processing divided by the interrupt count.
241   *
242   * This value may overflow.
243   */
244  uint64_t total_interrupt_time;
245#endif /* defined( RTEMS_PROFILING ) */
246} Per_CPU_Stats;
247
248/**
249 * @brief Per-CPU watchdog header index.
250 */
251typedef enum {
252  /**
253   * @brief Index for tick clock per-CPU watchdog header.
254   *
255   * The reference time point for the tick clock is the system start.  The
256   * clock resolution is one system clock tick.  It is used for the system
257   * clock tick based time services.
258   */
259  PER_CPU_WATCHDOG_TICKS,
260
261  /**
262   * @brief Index for realtime clock per-CPU watchdog header.
263   *
264   * The reference time point for the realtime clock is the POSIX Epoch.  The
265   * clock resolution is one nanosecond.  It is used for the time of day
266   * services and the POSIX services using CLOCK_REALTIME.
267   */
268  PER_CPU_WATCHDOG_REALTIME,
269
270  /**
271   * @brief Index for monotonic clock per-CPU watchdog header.
272   *
273   * The reference time point for the monotonic clock is the system start.  The
274   * clock resolution is one nanosecond.  It is used for the POSIX services
275   * using CLOCK_MONOTONIC.
276   */
277  PER_CPU_WATCHDOG_MONOTONIC,
278
279  /**
280   * @brief Count of per-CPU watchdog headers.
281   */
282  PER_CPU_WATCHDOG_COUNT
283} Per_CPU_Watchdog_index;
284
285/**
286 *  @brief Per CPU Core Structure
287 *
288 *  This structure is used to hold per core state information.
289 */
290typedef struct Per_CPU_Control {
291  #if CPU_PER_CPU_CONTROL_SIZE > 0
292    /**
293     * @brief CPU port specific control.
294     */
295    CPU_Per_CPU_control cpu_per_cpu;
296  #endif
297
298  /**
299   * @brief The interrupt stack low address for this processor.
300   */
301  void *interrupt_stack_low;
302
303  /**
304   * @brief The interrupt stack high address for this processor.
305   */
306  void *interrupt_stack_high;
307
308  /**
309   *  This contains the current interrupt nesting level on this
310   *  CPU.
311   */
312  uint32_t isr_nest_level;
313
314  /**
315   * @brief Indicetes if an ISR thread dispatch is disabled.
316   *
317   * This flag is context switched with each thread.  It indicates that this
318   * thread has an interrupt stack frame on its stack.  By using this flag, we
319   * can avoid nesting more interrupt dispatching attempts on a previously
320   * interrupted thread's stack.
321   */
322  uint32_t isr_dispatch_disable;
323
324  /**
325   * @brief The thread dispatch critical section nesting counter which is used
326   * to prevent context switches at inopportune moments.
327   */
328  volatile uint32_t thread_dispatch_disable_level;
329
330  /**
331   * @brief This is set to true when this processor needs to run the thread
332   * dispatcher.
333   *
334   * It is volatile since interrupts may alter this flag.
335   *
336   * This field is not protected by a lock and must be accessed only by this
337   * processor.  Code (e.g. scheduler and post-switch action requests) running
338   * on another processors must use an inter-processor interrupt to set the
339   * thread dispatch necessary indicator to true.
340   *
341   * @see _Thread_Get_heir_and_make_it_executing().
342   */
343  volatile bool dispatch_necessary;
344
345  /*
346   * Ensure that the executing member is at least 4-byte aligned, see
347   * PER_CPU_OFFSET_EXECUTING.  This is necessary on CPU ports with relaxed
348   * alignment restrictions, e.g. type alignment is less than the type size.
349   */
350  bool reserved_for_executing_alignment[ 3 ];
351
352  /**
353   * @brief This is the thread executing on this processor.
354   *
355   * This field is not protected by a lock.  The only writer is this processor.
356   *
357   * On SMP configurations a thread may be registered as executing on more than
358   * one processor in case a thread migration is in progress.  On SMP
359   * configurations use _Thread_Is_executing_on_a_processor() to figure out if
360   * a thread context is executing on a processor.
361   */
362  struct _Thread_Control *executing;
363
364  /**
365   * @brief This is the heir thread for this processor.
366   *
367   * This field is not protected by a lock.  The only writer after multitasking
368   * start is the scheduler owning this processor.  It is assumed that stores
369   * to pointers are atomic on all supported SMP architectures.  The CPU port
370   * specific code (inter-processor interrupt handling and
371   * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
372   * last value written.
373   *
374   * A thread can be a heir on at most one processor in the system.
375   *
376   * @see _Thread_Get_heir_and_make_it_executing().
377   */
378  struct _Thread_Control *heir;
379
380#if defined(RTEMS_SMP)
381  CPU_Interrupt_frame Interrupt_frame;
382#endif
383
384  /**
385   * @brief The CPU usage timestamp contains the time point of the last heir
386   * thread change or last CPU usage update of the executing thread of this
387   * processor.
388   *
389   * Protected by the scheduler lock.
390   *
391   * @see _Scheduler_Update_heir(), _Thread_Dispatch_update_heir() and
392   * _Thread_Get_CPU_time_used().
393   */
394  Timestamp_Control cpu_usage_timestamp;
395
396  /**
397   * @brief Watchdog state for this processor.
398   */
399  struct {
400    /**
401     * @brief Protects all watchdog operations on this processor.
402     */
403    ISR_LOCK_MEMBER( Lock )
404
405    /**
406     * @brief Watchdog ticks on this processor used for monotonic clock
407     * watchdogs.
408     */
409    uint64_t ticks;
410
411    /**
412     * @brief Header for watchdogs.
413     *
414     * @see Per_CPU_Watchdog_index.
415     */
416    Watchdog_Header Header[ PER_CPU_WATCHDOG_COUNT ];
417  } Watchdog;
418
419  #if defined( RTEMS_SMP )
420    /**
421     * @brief This lock protects some parts of the low-level thread dispatching.
422     *
423     * We must use a ticket lock here since we cannot transport a local context
424     * through the context switch.
425     *
426     * @see _Thread_Dispatch().
427     */
428    SMP_ticket_lock_Control Lock;
429
430    #if defined( RTEMS_PROFILING )
431      /**
432       * @brief Lock statistics for the per-CPU lock.
433       */
434      SMP_lock_Stats Lock_stats;
435
436      /**
437       * @brief Lock statistics context for the per-CPU lock.
438       */
439      SMP_lock_Stats_context Lock_stats_context;
440    #endif
441
442    /**
443     * @brief Chain of threads in need for help.
444     *
445     * This field is protected by the Per_CPU_Control::Lock lock.
446     */
447    Chain_Control Threads_in_need_for_help;
448
449    /**
450     * @brief Bit field for SMP messages.
451     *
452     * This bit field is not protected locks.  Atomic operations are used to
453     * set and get the message bits.
454     */
455    Atomic_Ulong message;
456
457    struct {
458      /**
459       * @brief The scheduler control of the scheduler owning this processor.
460       *
461       * This pointer is NULL in case this processor is currently not used by a
462       * scheduler instance.
463       */
464      const struct _Scheduler_Control *control;
465
466      /**
467       * @brief The scheduler context of the scheduler owning this processor.
468       *
469       * This pointer is NULL in case this processor is currently not used by a
470       * scheduler instance.
471       */
472      const struct Scheduler_Context *context;
473
474      /**
475       * @brief The idle thread for this processor in case it is online and
476       * currently not used by a scheduler instance.
477       */
478      struct _Thread_Control *idle_if_online_and_unused;
479    } Scheduler;
480
481    /**
482     * @brief Begin of the per-CPU data area.
483     *
484     * Contains items defined via PER_CPU_DATA_ITEM().
485     */
486    char *data;
487
488    /**
489     * @brief Indicates the current state of the CPU.
490     *
491     * This field is protected by the _Per_CPU_State_lock lock.
492     *
493     * @see _Per_CPU_State_change().
494     */
495    Per_CPU_State state;
496
497    /**
498     * @brief Action to be executed by this processor in the
499     * SYSTEM_STATE_BEFORE_MULTITASKING state on behalf of the boot processor.
500     *
501     * @see _SMP_Before_multitasking_action().
502     */
503    Atomic_Uintptr before_multitasking_action;
504
505    /**
506     * @brief Indicates if the processor has been successfully started via
507     * _CPU_SMP_Start_processor().
508     */
509    bool online;
510
511    /**
512     * @brief Indicates if the processor is the one that performed the initial
513     * system initialization.
514     */
515    bool boot;
516  #endif
517
518  struct Record_Control *record;
519
520  Per_CPU_Stats Stats;
521} Per_CPU_Control;
522
523#if defined( RTEMS_SMP )
524typedef struct {
525  Per_CPU_Control per_cpu;
526  char unused_space_for_cache_line_alignment
527    [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
528} Per_CPU_Control_envelope;
529#else
530typedef struct {
531  Per_CPU_Control per_cpu;
532} Per_CPU_Control_envelope;
533#endif
534
535/**
536 *  @brief Set of Per CPU Core Information
537 *
538 *  This is an array of per CPU core information.
539 */
540extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
541
542#if defined( RTEMS_SMP )
543#define _Per_CPU_Acquire( cpu ) \
544  _SMP_ticket_lock_Acquire( \
545    &( cpu )->Lock, \
546    &( cpu )->Lock_stats, \
547    &( cpu )->Lock_stats_context \
548  )
549#else
550#define _Per_CPU_Acquire( cpu ) \
551  do { \
552    (void) ( cpu ); \
553  } while ( 0 )
554#endif
555
556#if defined( RTEMS_SMP )
557#define _Per_CPU_Release( cpu ) \
558  _SMP_ticket_lock_Release( \
559    &( cpu )->Lock, \
560    &( cpu )->Lock_stats_context \
561  )
562#else
563#define _Per_CPU_Release( cpu ) \
564  do { \
565    (void) ( cpu ); \
566  } while ( 0 )
567#endif
568
569#if defined( RTEMS_SMP )
570#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
571  do { \
572    _ISR_Local_disable( isr_cookie ); \
573    _Per_CPU_Acquire( cpu ); \
574  } while ( 0 )
575#else
576#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
577  do { \
578    _ISR_Local_disable( isr_cookie ); \
579    (void) ( cpu ); \
580  } while ( 0 )
581#endif
582
583#if defined( RTEMS_SMP )
584#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
585  do { \
586    _Per_CPU_Release( cpu ); \
587    _ISR_Local_enable( isr_cookie ); \
588  } while ( 0 )
589#else
590#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
591  do { \
592    (void) ( cpu ); \
593    _ISR_Local_enable( isr_cookie ); \
594  } while ( 0 )
595#endif
596
597#if defined( RTEMS_SMP )
598#define _Per_CPU_Acquire_all( isr_cookie ) \
599  do { \
600    uint32_t ncpus = _SMP_Get_processor_maximum(); \
601    uint32_t cpu; \
602    _ISR_Local_disable( isr_cookie ); \
603    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
604      _Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \
605    } \
606  } while ( 0 )
607#else
608#define _Per_CPU_Acquire_all( isr_cookie ) \
609  _ISR_Local_disable( isr_cookie )
610#endif
611
612#if defined( RTEMS_SMP )
613#define _Per_CPU_Release_all( isr_cookie ) \
614  do { \
615    uint32_t ncpus = _SMP_Get_processor_maximum(); \
616    uint32_t cpu; \
617    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
618      _Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \
619    } \
620    _ISR_Local_enable( isr_cookie ); \
621  } while ( 0 )
622#else
623#define _Per_CPU_Release_all( isr_cookie ) \
624  _ISR_Local_enable( isr_cookie )
625#endif
626
627/*
628 * If we get the current processor index in a context which allows thread
629 * dispatching, then we may already run on another processor right after the
630 * read instruction.  There are very few cases in which this makes sense (here
631 * we can use _Per_CPU_Get_snapshot()).  All other places must use
632 * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
633 */
634#if defined( _CPU_Get_current_per_CPU_control )
635  #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
636#else
637  #define _Per_CPU_Get_snapshot() \
638    ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
639#endif
640
641#if defined( RTEMS_SMP )
642static inline Per_CPU_Control *_Per_CPU_Get( void )
643{
644  Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
645
646  _Assert(
647    cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
648  );
649
650  return cpu_self;
651}
652#else
653#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
654#endif
655
656static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
657{
658  return &_Per_CPU_Information[ index ].per_cpu;
659}
660
661static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
662{
663  const Per_CPU_Control_envelope *per_cpu_envelope =
664    ( const Per_CPU_Control_envelope * ) cpu;
665
666  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
667}
668
669static inline struct _Thread_Control *_Per_CPU_Get_executing(
670  const Per_CPU_Control *cpu
671)
672{
673  return cpu->executing;
674}
675
676static inline bool _Per_CPU_Is_processor_online(
677  const Per_CPU_Control *cpu
678)
679{
680#if defined( RTEMS_SMP )
681  return cpu->online;
682#else
683  (void) cpu;
684
685  return true;
686#endif
687}
688
689static inline bool _Per_CPU_Is_boot_processor(
690  const Per_CPU_Control *cpu
691)
692{
693#if defined( RTEMS_SMP )
694  return cpu->boot;
695#else
696  (void) cpu;
697
698  return true;
699#endif
700}
701
702#if defined( RTEMS_SMP )
703
704/**
705 *  @brief Allocate and Initialize Per CPU Structures
706 *
707 *  This method allocates and initialize the per CPU structure.
708 */
709void _Per_CPU_Initialize(void);
710
711void _Per_CPU_State_change(
712  Per_CPU_Control *cpu,
713  Per_CPU_State new_state
714);
715
716/**
717 * @brief Waits for a processor to change into a non-initial state.
718 *
719 * This function should be called only in _CPU_SMP_Start_processor() if
720 * required by the CPU port or BSP.
721 *
722 * @code
723 * bool _CPU_SMP_Start_processor(uint32_t cpu_index)
724 * {
725 *   uint32_t timeout = 123456;
726 *
727 *   start_the_processor(cpu_index);
728 *
729 *   return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout);
730 * }
731 * @endcode
732 *
733 * @param[in] cpu_index The processor index.
734 * @param[in] timeout_in_ns The timeout in nanoseconds.  Use a value of zero to
735 * wait forever if necessary.
736 *
737 * @retval true The processor is in a non-initial state.
738 * @retval false The timeout expired before the processor reached a non-initial
739 * state.
740 */
741bool _Per_CPU_State_wait_for_non_initial_state(
742  uint32_t cpu_index,
743  uint32_t timeout_in_ns
744);
745
746#endif /* defined( RTEMS_SMP ) */
747
748/*
749 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
750 * Thus when built for non-SMP, there should be no performance penalty.
751 */
752#define _Thread_Dispatch_disable_level \
753  _Per_CPU_Get()->thread_dispatch_disable_level
754#define _Thread_Heir \
755  _Per_CPU_Get()->heir
756
757#if defined(_CPU_Get_thread_executing)
758#define _Thread_Executing \
759  _CPU_Get_thread_executing()
760#else
761#define _Thread_Executing \
762  _Per_CPU_Get_executing( _Per_CPU_Get() )
763#endif
764
765#define _ISR_Nest_level \
766  _Per_CPU_Get()->isr_nest_level
767#define _CPU_Interrupt_stack_low \
768  _Per_CPU_Get()->interrupt_stack_low
769#define _CPU_Interrupt_stack_high \
770  _Per_CPU_Get()->interrupt_stack_high
771#define _Thread_Dispatch_necessary \
772  _Per_CPU_Get()->dispatch_necessary
773
774/**
775 * @brief Returns the thread control block of the executing thread.
776 *
777 * This function can be called in any thread context.  On SMP configurations,
778 * interrupts are disabled to ensure that the processor index is used
779 * consistently if no CPU port specific method is available to get the
780 * executing thread.
781 *
782 * @return The thread control block of the executing thread.
783 */
784RTEMS_INLINE_ROUTINE struct _Thread_Control *_Thread_Get_executing( void )
785{
786  struct _Thread_Control *executing;
787
788  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
789    ISR_Level level;
790
791    _ISR_Local_disable( level );
792  #endif
793
794  executing = _Thread_Executing;
795
796  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
797    _ISR_Local_enable( level );
798  #endif
799
800  return executing;
801}
802
803/**@}*/
804
805#endif /* !defined( ASM ) */
806
807#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
808
809#define PER_CPU_INTERRUPT_STACK_LOW \
810  CPU_PER_CPU_CONTROL_SIZE
811#define PER_CPU_INTERRUPT_STACK_HIGH \
812  PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
813
814#define INTERRUPT_STACK_LOW \
815  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
816#define INTERRUPT_STACK_HIGH \
817  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
818
819/*
820 *  These are the offsets of the required elements in the per CPU table.
821 */
822#define PER_CPU_ISR_NEST_LEVEL \
823  PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
824#define PER_CPU_ISR_DISPATCH_DISABLE \
825  PER_CPU_ISR_NEST_LEVEL + 4
826#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
827  PER_CPU_ISR_DISPATCH_DISABLE + 4
828#define PER_CPU_DISPATCH_NEEDED \
829  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
830#define PER_CPU_OFFSET_EXECUTING \
831  PER_CPU_DISPATCH_NEEDED + 4
832#define PER_CPU_OFFSET_HEIR \
833  PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
834#if defined(RTEMS_SMP)
835#define PER_CPU_INTERRUPT_FRAME_AREA \
836  PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
837#endif
838
839#define THREAD_DISPATCH_DISABLE_LEVEL \
840  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
841#define ISR_NEST_LEVEL \
842  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
843#define DISPATCH_NEEDED \
844  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
845
846#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
847
848#ifdef __cplusplus
849}
850#endif
851
852#endif
853/* end of include file */
Note: See TracBrowser for help on using the repository browser.