source: rtems/cpukit/include/rtems/score/percpu.h @ b36bf5b

Last change on this file since b36bf5b was b36bf5b, checked in by Sebastian Huber <sebastian.huber@…>, on Jun 29, 2018 at 10:55:28 AM

score: Increase PER_CPU_CONTROL_SIZE_APPROX

Increase the PER_CPU_CONTROL_SIZE_APPROX on 64-bit targets.

Update #3433.

  • Property mode set to 100644
File size: 23.0 KB
Line 
1/**
2 *  @file  rtems/score/percpu.h
3 *
4 *  This include file defines the per CPU information required
5 *  by RTEMS.
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (c) 2012, 2016 embedded brains GmbH
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_PERCPU_H
20#define _RTEMS_PERCPU_H
21
22#include <rtems/score/cpuimpl.h>
23
24#if defined( ASM )
25  #include <rtems/asm.h>
26#else
27  #include <rtems/score/assert.h>
28  #include <rtems/score/chain.h>
29  #include <rtems/score/isrlock.h>
30  #include <rtems/score/smp.h>
31  #include <rtems/score/smplock.h>
32  #include <rtems/score/timestamp.h>
33  #include <rtems/score/watchdog.h>
34#endif
35
36#ifdef __cplusplus
37extern "C" {
38#endif
39
40#if defined(RTEMS_SMP)
41  #if defined(RTEMS_PROFILING)
42    #define PER_CPU_CONTROL_SIZE_APPROX ( 512 + CPU_INTERRUPT_FRAME_SIZE )
43  #elif defined(RTEMS_DEBUG) || CPU_SIZEOF_POINTER > 4
44    #define PER_CPU_CONTROL_SIZE_APPROX ( 256 + CPU_INTERRUPT_FRAME_SIZE )
45  #else
46    #define PER_CPU_CONTROL_SIZE_APPROX ( 128 + CPU_INTERRUPT_FRAME_SIZE )
47  #endif
48
49  /*
50   * This ensures that on SMP configurations the individual per-CPU controls
51   * are on different cache lines to prevent false sharing.  This define can be
52   * used in assembler code to easily get the per-CPU control for a particular
53   * processor.
54   */
55  #if PER_CPU_CONTROL_SIZE_APPROX > 1024
56    #define PER_CPU_CONTROL_SIZE_LOG2 11
57  #elif PER_CPU_CONTROL_SIZE_APPROX > 512
58    #define PER_CPU_CONTROL_SIZE_LOG2 10
59  #elif PER_CPU_CONTROL_SIZE_APPROX > 256
60    #define PER_CPU_CONTROL_SIZE_LOG2 9
61  #elif PER_CPU_CONTROL_SIZE_APPROX > 128
62    #define PER_CPU_CONTROL_SIZE_LOG2 8
63  #else
64    #define PER_CPU_CONTROL_SIZE_LOG2 7
65  #endif
66
67  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
68#endif
69
70#if !defined( ASM )
71
72struct _Thread_Control;
73
74struct Scheduler_Context;
75
76/**
77 *  @defgroup PerCPU RTEMS Per CPU Information
78 *
79 *  @ingroup Score
80 *
81 *  This defines the per CPU state information required by RTEMS
82 *  and the BSP.  In an SMP configuration, there will be multiple
83 *  instances of this data structure -- one per CPU -- and the
84 *  current CPU number will be used as the index.
85 */
86
87/**@{*/
88
89#if defined( RTEMS_SMP )
90
91/**
92 * @brief State of a processor.
93 *
94 * The processor state controls the life cycle of processors at the lowest
95 * level.  No multi-threading or other high-level concepts matter here.
96 *
97 * State changes must be initiated via _Per_CPU_State_change().  This function
98 * may not return in case someone requested a shutdown.  The
99 * _SMP_Send_message() function will be used to notify other processors about
100 * state changes if the other processor is in the up state.
101 *
102 * Due to the sequential nature of the basic system initialization one
103 * processor has a special role.  It is the processor executing the boot_card()
104 * function.  This processor is called the boot processor.  All other
105 * processors are called secondary.
106 *
107 * @dot
108 * digraph states {
109 *   i [label="PER_CPU_STATE_INITIAL"];
110 *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
111 *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
112 *   u [label="PER_CPU_STATE_UP"];
113 *   s [label="PER_CPU_STATE_SHUTDOWN"];
114 *   i -> rdy [label="processor\ncompleted initialization"];
115 *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
116 *   reqsm -> u [label="processor\nstarts multitasking"];
117 *   i -> s;
118 *   rdy -> s;
119 *   reqsm -> s;
120 *   u -> s;
121 * }
122 * @enddot
123 */
124typedef enum {
125  /**
126   * @brief The per CPU controls are initialized to zero.
127   *
128   * The boot processor executes the sequential boot code in this state.  The
129   * secondary processors should perform their basic initialization now and
130   * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
131   * is complete.
132   */
133  PER_CPU_STATE_INITIAL,
134
135  /**
136   * @brief Processor is ready to start multitasking.
137   *
138   * The secondary processor performed its basic initialization and is ready to
139   * receive inter-processor interrupts.  Interrupt delivery must be disabled
140   * in this state, but requested inter-processor interrupts must be recorded
141   * and must be delivered once the secondary processor enables interrupts for
142   * the first time.  The boot processor will wait for all secondary processors
143   * to change into this state.  In case a secondary processor does not reach
144   * this state the system will not start.  The secondary processors wait now
145   * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set
146   * by the boot processor once all secondary processors reached the
147   * PER_CPU_STATE_READY_TO_START_MULTITASKING state.
148   */
149  PER_CPU_STATE_READY_TO_START_MULTITASKING,
150
151  /**
152   * @brief Multitasking start of processor is requested.
153   *
154   * The boot processor completed system initialization and is about to perform
155   * a context switch to its heir thread.  Secondary processors should now
156   * issue a context switch to the heir thread.  This normally enables
157   * interrupts on the processor for the first time.
158   */
159  PER_CPU_STATE_REQUEST_START_MULTITASKING,
160
161  /**
162   * @brief Normal multitasking state.
163   */
164  PER_CPU_STATE_UP,
165
166  /**
167   * @brief This is the terminal state.
168   */
169  PER_CPU_STATE_SHUTDOWN
170} Per_CPU_State;
171
172#endif /* defined( RTEMS_SMP ) */
173
174/**
175 * @brief Per-CPU statistics.
176 */
177typedef struct {
178#if defined( RTEMS_PROFILING )
179  /**
180   * @brief The thread dispatch disabled begin instant in CPU counter ticks.
181   *
182   * This value is used to measure the time of disabled thread dispatching.
183   */
184  CPU_Counter_ticks thread_dispatch_disabled_instant;
185
186  /**
187   * @brief The maximum time of disabled thread dispatching in CPU counter
188   * ticks.
189   */
190  CPU_Counter_ticks max_thread_dispatch_disabled_time;
191
192  /**
193   * @brief The maximum time spent to process a single sequence of nested
194   * interrupts in CPU counter ticks.
195   *
196   * This is the time interval between the change of the interrupt nest level
197   * from zero to one and the change back from one to zero.
198   */
199  CPU_Counter_ticks max_interrupt_time;
200
201  /**
202   * @brief The maximum interrupt delay in CPU counter ticks if supported by
203   * the hardware.
204   */
205  CPU_Counter_ticks max_interrupt_delay;
206
207  /**
208   * @brief Count of times when the thread dispatch disable level changes from
209   * zero to one in thread context.
210   *
211   * This value may overflow.
212   */
213  uint64_t thread_dispatch_disabled_count;
214
215  /**
216   * @brief Total time of disabled thread dispatching in CPU counter ticks.
217   *
218   * The average time of disabled thread dispatching is the total time of
219   * disabled thread dispatching divided by the thread dispatch disabled
220   * count.
221   *
222   * This value may overflow.
223   */
224  uint64_t total_thread_dispatch_disabled_time;
225
226  /**
227   * @brief Count of times when the interrupt nest level changes from zero to
228   * one.
229   *
230   * This value may overflow.
231   */
232  uint64_t interrupt_count;
233
234  /**
235   * @brief Total time of interrupt processing in CPU counter ticks.
236   *
237   * The average time of interrupt processing is the total time of interrupt
238   * processing divided by the interrupt count.
239   *
240   * This value may overflow.
241   */
242  uint64_t total_interrupt_time;
243#endif /* defined( RTEMS_PROFILING ) */
244} Per_CPU_Stats;
245
246/**
247 * @brief Per-CPU watchdog header index.
248 */
249typedef enum {
250  /**
251   * @brief Index for tick clock per-CPU watchdog header.
252   *
253   * The reference time point for the tick clock is the system start.  The
254   * clock resolution is one system clock tick.  It is used for the system
255   * clock tick based time services.
256   */
257  PER_CPU_WATCHDOG_TICKS,
258
259  /**
260   * @brief Index for realtime clock per-CPU watchdog header.
261   *
262   * The reference time point for the realtime clock is the POSIX Epoch.  The
263   * clock resolution is one nanosecond.  It is used for the time of day
264   * services and the POSIX services using CLOCK_REALTIME.
265   */
266  PER_CPU_WATCHDOG_REALTIME,
267
268  /**
269   * @brief Index for monotonic clock per-CPU watchdog header.
270   *
271   * The reference time point for the monotonic clock is the system start.  The
272   * clock resolution is one nanosecond.  It is used for the POSIX services
273   * using CLOCK_MONOTONIC.
274   */
275  PER_CPU_WATCHDOG_MONOTONIC,
276
277  /**
278   * @brief Count of per-CPU watchdog headers.
279   */
280  PER_CPU_WATCHDOG_COUNT
281} Per_CPU_Watchdog_index;
282
283/**
284 *  @brief Per CPU Core Structure
285 *
286 *  This structure is used to hold per core state information.
287 */
288typedef struct Per_CPU_Control {
289  #if CPU_PER_CPU_CONTROL_SIZE > 0
290    /**
291     * @brief CPU port specific control.
292     */
293    CPU_Per_CPU_control cpu_per_cpu;
294  #endif
295
296  /**
297   * @brief The interrupt stack low address for this processor.
298   */
299  void *interrupt_stack_low;
300
301  /**
302   * @brief The interrupt stack high address for this processor.
303   */
304  void *interrupt_stack_high;
305
306  /**
307   *  This contains the current interrupt nesting level on this
308   *  CPU.
309   */
310  uint32_t isr_nest_level;
311
312  /**
313   * @brief Indicetes if an ISR thread dispatch is disabled.
314   *
315   * This flag is context switched with each thread.  It indicates that this
316   * thread has an interrupt stack frame on its stack.  By using this flag, we
317   * can avoid nesting more interrupt dispatching attempts on a previously
318   * interrupted thread's stack.
319   */
320  uint32_t isr_dispatch_disable;
321
322  /**
323   * @brief The thread dispatch critical section nesting counter which is used
324   * to prevent context switches at inopportune moments.
325   */
326  volatile uint32_t thread_dispatch_disable_level;
327
328  /**
329   * @brief This is set to true when this processor needs to run the thread
330   * dispatcher.
331   *
332   * It is volatile since interrupts may alter this flag.
333   *
334   * This field is not protected by a lock and must be accessed only by this
335   * processor.  Code (e.g. scheduler and post-switch action requests) running
336   * on another processors must use an inter-processor interrupt to set the
337   * thread dispatch necessary indicator to true.
338   *
339   * @see _Thread_Get_heir_and_make_it_executing().
340   */
341  volatile bool dispatch_necessary;
342
343  /*
344   * Ensure that the executing member is at least 4-byte aligned, see
345   * PER_CPU_OFFSET_EXECUTING.  This is necessary on CPU ports with relaxed
346   * alignment restrictions, e.g. type alignment is less than the type size.
347   */
348  bool reserved_for_executing_alignment[ 3 ];
349
350  /**
351   * @brief This is the thread executing on this processor.
352   *
353   * This field is not protected by a lock.  The only writer is this processor.
354   *
355   * On SMP configurations a thread may be registered as executing on more than
356   * one processor in case a thread migration is in progress.  On SMP
357   * configurations use _Thread_Is_executing_on_a_processor() to figure out if
358   * a thread context is executing on a processor.
359   */
360  struct _Thread_Control *executing;
361
362  /**
363   * @brief This is the heir thread for this processor.
364   *
365   * This field is not protected by a lock.  The only writer after multitasking
366   * start is the scheduler owning this processor.  It is assumed that stores
367   * to pointers are atomic on all supported SMP architectures.  The CPU port
368   * specific code (inter-processor interrupt handling and
369   * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
370   * last value written.
371   *
372   * A thread can be a heir on at most one processor in the system.
373   *
374   * @see _Thread_Get_heir_and_make_it_executing().
375   */
376  struct _Thread_Control *heir;
377
378#if defined(RTEMS_SMP)
379  CPU_Interrupt_frame Interrupt_frame;
380#endif
381
382  /**
383   * @brief The CPU usage timestamp contains the time point of the last heir
384   * thread change or last CPU usage update of the executing thread of this
385   * processor.
386   *
387   * Protected by the scheduler lock.
388   *
389   * @see _Scheduler_Update_heir(), _Thread_Dispatch_update_heir() and
390   * _Thread_Get_CPU_time_used().
391   */
392  Timestamp_Control cpu_usage_timestamp;
393
394  /**
395   * @brief Watchdog state for this processor.
396   */
397  struct {
398    /**
399     * @brief Protects all watchdog operations on this processor.
400     */
401    ISR_LOCK_MEMBER( Lock )
402
403    /**
404     * @brief Watchdog ticks on this processor used for monotonic clock
405     * watchdogs.
406     */
407    uint64_t ticks;
408
409    /**
410     * @brief Header for watchdogs.
411     *
412     * @see Per_CPU_Watchdog_index.
413     */
414    Watchdog_Header Header[ PER_CPU_WATCHDOG_COUNT ];
415  } Watchdog;
416
417  #if defined( RTEMS_SMP )
418    /**
419     * @brief This lock protects some parts of the low-level thread dispatching.
420     *
421     * We must use a ticket lock here since we cannot transport a local context
422     * through the context switch.
423     *
424     * @see _Thread_Dispatch().
425     */
426    SMP_ticket_lock_Control Lock;
427
428    #if defined( RTEMS_PROFILING )
429      /**
430       * @brief Lock statistics for the per-CPU lock.
431       */
432      SMP_lock_Stats Lock_stats;
433
434      /**
435       * @brief Lock statistics context for the per-CPU lock.
436       */
437      SMP_lock_Stats_context Lock_stats_context;
438    #endif
439
440    /**
441     * @brief Chain of threads in need for help.
442     *
443     * This field is protected by the Per_CPU_Control::Lock lock.
444     */
445    Chain_Control Threads_in_need_for_help;
446
447    /**
448     * @brief Bit field for SMP messages.
449     *
450     * This bit field is not protected locks.  Atomic operations are used to
451     * set and get the message bits.
452     */
453    Atomic_Ulong message;
454
455    struct {
456      /**
457       * @brief The scheduler control of the scheduler owning this processor.
458       *
459       * This pointer is NULL in case this processor is currently not used by a
460       * scheduler instance.
461       */
462      const struct _Scheduler_Control *control;
463
464      /**
465       * @brief The scheduler context of the scheduler owning this processor.
466       *
467       * This pointer is NULL in case this processor is currently not used by a
468       * scheduler instance.
469       */
470      const struct Scheduler_Context *context;
471
472      /**
473       * @brief The idle thread for this processor in case it is online and
474       * currently not used by a scheduler instance.
475       */
476      struct _Thread_Control *idle_if_online_and_unused;
477    } Scheduler;
478
479    /**
480     * @brief Indicates the current state of the CPU.
481     *
482     * This field is protected by the _Per_CPU_State_lock lock.
483     *
484     * @see _Per_CPU_State_change().
485     */
486    Per_CPU_State state;
487
488    /**
489     * @brief Action to be executed by this processor in the
490     * SYSTEM_STATE_BEFORE_MULTITASKING state on behalf of the boot processor.
491     *
492     * @see _SMP_Before_multitasking_action().
493     */
494    Atomic_Uintptr before_multitasking_action;
495
496    /**
497     * @brief Indicates if the processor has been successfully started via
498     * _CPU_SMP_Start_processor().
499     */
500    bool online;
501
502    /**
503     * @brief Indicates if the processor is the one that performed the initial
504     * system initialization.
505     */
506    bool boot;
507  #endif
508
509  Per_CPU_Stats Stats;
510} Per_CPU_Control;
511
512#if defined( RTEMS_SMP )
513typedef struct {
514  Per_CPU_Control per_cpu;
515  char unused_space_for_cache_line_alignment
516    [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
517} Per_CPU_Control_envelope;
518#else
519typedef struct {
520  Per_CPU_Control per_cpu;
521} Per_CPU_Control_envelope;
522#endif
523
524/**
525 *  @brief Set of Per CPU Core Information
526 *
527 *  This is an array of per CPU core information.
528 */
529extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
530
531#if defined( RTEMS_SMP )
532#define _Per_CPU_Acquire( cpu ) \
533  _SMP_ticket_lock_Acquire( \
534    &( cpu )->Lock, \
535    &( cpu )->Lock_stats, \
536    &( cpu )->Lock_stats_context \
537  )
538#else
539#define _Per_CPU_Acquire( cpu ) \
540  do { \
541    (void) ( cpu ); \
542  } while ( 0 )
543#endif
544
545#if defined( RTEMS_SMP )
546#define _Per_CPU_Release( cpu ) \
547  _SMP_ticket_lock_Release( \
548    &( cpu )->Lock, \
549    &( cpu )->Lock_stats_context \
550  )
551#else
552#define _Per_CPU_Release( cpu ) \
553  do { \
554    (void) ( cpu ); \
555  } while ( 0 )
556#endif
557
558#if defined( RTEMS_SMP )
559#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
560  do { \
561    _ISR_Local_disable( isr_cookie ); \
562    _Per_CPU_Acquire( cpu ); \
563  } while ( 0 )
564#else
565#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
566  do { \
567    _ISR_Local_disable( isr_cookie ); \
568    (void) ( cpu ); \
569  } while ( 0 )
570#endif
571
572#if defined( RTEMS_SMP )
573#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
574  do { \
575    _Per_CPU_Release( cpu ); \
576    _ISR_Local_enable( isr_cookie ); \
577  } while ( 0 )
578#else
579#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
580  do { \
581    (void) ( cpu ); \
582    _ISR_Local_enable( isr_cookie ); \
583  } while ( 0 )
584#endif
585
586#if defined( RTEMS_SMP )
587#define _Per_CPU_Acquire_all( isr_cookie ) \
588  do { \
589    uint32_t ncpus = _SMP_Get_processor_count(); \
590    uint32_t cpu; \
591    _ISR_Local_disable( isr_cookie ); \
592    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
593      _Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \
594    } \
595  } while ( 0 )
596#else
597#define _Per_CPU_Acquire_all( isr_cookie ) \
598  _ISR_Local_disable( isr_cookie )
599#endif
600
601#if defined( RTEMS_SMP )
602#define _Per_CPU_Release_all( isr_cookie ) \
603  do { \
604    uint32_t ncpus = _SMP_Get_processor_count(); \
605    uint32_t cpu; \
606    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
607      _Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \
608    } \
609    _ISR_Local_enable( isr_cookie ); \
610  } while ( 0 )
611#else
612#define _Per_CPU_Release_all( isr_cookie ) \
613  _ISR_Local_enable( isr_cookie )
614#endif
615
616/*
617 * If we get the current processor index in a context which allows thread
618 * dispatching, then we may already run on another processor right after the
619 * read instruction.  There are very few cases in which this makes sense (here
620 * we can use _Per_CPU_Get_snapshot()).  All other places must use
621 * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
622 */
623#if defined( _CPU_Get_current_per_CPU_control )
624  #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
625#else
626  #define _Per_CPU_Get_snapshot() \
627    ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
628#endif
629
630#if defined( RTEMS_SMP )
631static inline Per_CPU_Control *_Per_CPU_Get( void )
632{
633  Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
634
635  _Assert(
636    cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
637  );
638
639  return cpu_self;
640}
641#else
642#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
643#endif
644
645static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
646{
647  return &_Per_CPU_Information[ index ].per_cpu;
648}
649
650static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
651{
652  const Per_CPU_Control_envelope *per_cpu_envelope =
653    ( const Per_CPU_Control_envelope * ) cpu;
654
655  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
656}
657
658static inline struct _Thread_Control *_Per_CPU_Get_executing(
659  const Per_CPU_Control *cpu
660)
661{
662  return cpu->executing;
663}
664
665static inline bool _Per_CPU_Is_processor_online(
666  const Per_CPU_Control *cpu
667)
668{
669#if defined( RTEMS_SMP )
670  return cpu->online;
671#else
672  (void) cpu;
673
674  return true;
675#endif
676}
677
678static inline bool _Per_CPU_Is_boot_processor(
679  const Per_CPU_Control *cpu
680)
681{
682#if defined( RTEMS_SMP )
683  return cpu->boot;
684#else
685  (void) cpu;
686
687  return true;
688#endif
689}
690
691#if defined( RTEMS_SMP )
692
693/**
694 *  @brief Allocate and Initialize Per CPU Structures
695 *
696 *  This method allocates and initialize the per CPU structure.
697 */
698void _Per_CPU_Initialize(void);
699
700void _Per_CPU_State_change(
701  Per_CPU_Control *cpu,
702  Per_CPU_State new_state
703);
704
705/**
706 * @brief Waits for a processor to change into a non-initial state.
707 *
708 * This function should be called only in _CPU_SMP_Start_processor() if
709 * required by the CPU port or BSP.
710 *
711 * @code
712 * bool _CPU_SMP_Start_processor(uint32_t cpu_index)
713 * {
714 *   uint32_t timeout = 123456;
715 *
716 *   start_the_processor(cpu_index);
717 *
718 *   return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout);
719 * }
720 * @endcode
721 *
722 * @param[in] cpu_index The processor index.
723 * @param[in] timeout_in_ns The timeout in nanoseconds.  Use a value of zero to
724 * wait forever if necessary.
725 *
726 * @retval true The processor is in a non-initial state.
727 * @retval false The timeout expired before the processor reached a non-initial
728 * state.
729 */
730bool _Per_CPU_State_wait_for_non_initial_state(
731  uint32_t cpu_index,
732  uint32_t timeout_in_ns
733);
734
735#endif /* defined( RTEMS_SMP ) */
736
737/*
738 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
739 * Thus when built for non-SMP, there should be no performance penalty.
740 */
741#define _Thread_Dispatch_disable_level \
742  _Per_CPU_Get()->thread_dispatch_disable_level
743#define _Thread_Heir \
744  _Per_CPU_Get()->heir
745
746#if defined(_CPU_Get_thread_executing)
747#define _Thread_Executing \
748  _CPU_Get_thread_executing()
749#else
750#define _Thread_Executing \
751  _Per_CPU_Get_executing( _Per_CPU_Get() )
752#endif
753
754#define _ISR_Nest_level \
755  _Per_CPU_Get()->isr_nest_level
756#define _CPU_Interrupt_stack_low \
757  _Per_CPU_Get()->interrupt_stack_low
758#define _CPU_Interrupt_stack_high \
759  _Per_CPU_Get()->interrupt_stack_high
760#define _Thread_Dispatch_necessary \
761  _Per_CPU_Get()->dispatch_necessary
762
763/**
764 * @brief Returns the thread control block of the executing thread.
765 *
766 * This function can be called in any thread context.  On SMP configurations,
767 * interrupts are disabled to ensure that the processor index is used
768 * consistently if no CPU port specific method is available to get the
769 * executing thread.
770 *
771 * @return The thread control block of the executing thread.
772 */
773RTEMS_INLINE_ROUTINE struct _Thread_Control *_Thread_Get_executing( void )
774{
775  struct _Thread_Control *executing;
776
777  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
778    ISR_Level level;
779
780    _ISR_Local_disable( level );
781  #endif
782
783  executing = _Thread_Executing;
784
785  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
786    _ISR_Local_enable( level );
787  #endif
788
789  return executing;
790}
791
792/**@}*/
793
794#endif /* !defined( ASM ) */
795
796#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
797
798#define PER_CPU_INTERRUPT_STACK_LOW \
799  CPU_PER_CPU_CONTROL_SIZE
800#define PER_CPU_INTERRUPT_STACK_HIGH \
801  PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
802
803#define INTERRUPT_STACK_LOW \
804  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
805#define INTERRUPT_STACK_HIGH \
806  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
807
808/*
809 *  These are the offsets of the required elements in the per CPU table.
810 */
811#define PER_CPU_ISR_NEST_LEVEL \
812  PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
813#define PER_CPU_ISR_DISPATCH_DISABLE \
814  PER_CPU_ISR_NEST_LEVEL + 4
815#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
816  PER_CPU_ISR_DISPATCH_DISABLE + 4
817#define PER_CPU_DISPATCH_NEEDED \
818  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
819#define PER_CPU_OFFSET_EXECUTING \
820  PER_CPU_DISPATCH_NEEDED + 4
821#define PER_CPU_OFFSET_HEIR \
822  PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
823#if defined(RTEMS_SMP)
824#define PER_CPU_INTERRUPT_FRAME_AREA \
825  PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
826#endif
827
828#define THREAD_DISPATCH_DISABLE_LEVEL \
829  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
830#define ISR_NEST_LEVEL \
831  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
832#define DISPATCH_NEEDED \
833  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
834
835#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
836
837#ifdef __cplusplus
838}
839#endif
840
841#endif
842/* end of include file */
Note: See TracBrowser for help on using the repository browser.