source: rtems/cpukit/include/rtems/score/percpu.h @ dda1922

Last change on this file since dda1922 was dda1922, checked in by Sebastian Huber <sebastian.huber@…>, on May 10, 2019 at 7:09:40 AM

score: Adjust PER_CPU_CONTROL_SIZE_APPROX

Account for recent Per_CPU_Control structure member additions.

  • Property mode set to 100644
File size: 23.3 KB
Line 
1/**
2 *  @file
3 *
4 *  This include file defines the per CPU information required
5 *  by RTEMS.
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (c) 2012, 2018 embedded brains GmbH
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_PERCPU_H
20#define _RTEMS_PERCPU_H
21
22#include <rtems/score/cpuimpl.h>
23
24#if defined( ASM )
25  #include <rtems/asm.h>
26#else
27  #include <rtems/score/assert.h>
28  #include <rtems/score/chain.h>
29  #include <rtems/score/isrlock.h>
30  #include <rtems/score/smp.h>
31  #include <rtems/score/timestamp.h>
32  #include <rtems/score/watchdog.h>
33#endif
34
35#ifdef __cplusplus
36extern "C" {
37#endif
38
39#if defined(RTEMS_SMP)
40  #if defined(RTEMS_PROFILING)
41    #define PER_CPU_CONTROL_SIZE_APPROX \
42      ( 512 + CPU_PER_CPU_CONTROL_SIZE + CPU_INTERRUPT_FRAME_SIZE )
43  #elif defined(RTEMS_DEBUG) || CPU_SIZEOF_POINTER > 4
44    #define PER_CPU_CONTROL_SIZE_APPROX \
45      ( 256 + CPU_PER_CPU_CONTROL_SIZE + CPU_INTERRUPT_FRAME_SIZE )
46  #else
47    #define PER_CPU_CONTROL_SIZE_APPROX \
48      ( 180 + CPU_PER_CPU_CONTROL_SIZE + CPU_INTERRUPT_FRAME_SIZE )
49  #endif
50
51  /*
52   * This ensures that on SMP configurations the individual per-CPU controls
53   * are on different cache lines to prevent false sharing.  This define can be
54   * used in assembler code to easily get the per-CPU control for a particular
55   * processor.
56   */
57  #if PER_CPU_CONTROL_SIZE_APPROX > 1024
58    #define PER_CPU_CONTROL_SIZE_LOG2 11
59  #elif PER_CPU_CONTROL_SIZE_APPROX > 512
60    #define PER_CPU_CONTROL_SIZE_LOG2 10
61  #elif PER_CPU_CONTROL_SIZE_APPROX > 256
62    #define PER_CPU_CONTROL_SIZE_LOG2 9
63  #elif PER_CPU_CONTROL_SIZE_APPROX > 128
64    #define PER_CPU_CONTROL_SIZE_LOG2 8
65  #else
66    #define PER_CPU_CONTROL_SIZE_LOG2 7
67  #endif
68
69  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
70#endif
71
72#if !defined( ASM )
73
74struct Record_Control;
75
76struct _Thread_Control;
77
78struct Scheduler_Context;
79
80struct Per_CPU_Job;
81
82/**
83 *  @defgroup PerCPU RTEMS Per CPU Information
84 *
85 *  @ingroup RTEMSScore
86 *
87 *  This defines the per CPU state information required by RTEMS
88 *  and the BSP.  In an SMP configuration, there will be multiple
89 *  instances of this data structure -- one per CPU -- and the
90 *  current CPU number will be used as the index.
91 */
92
93/**@{*/
94
95#if defined( RTEMS_SMP )
96
97/**
98 * @brief State of a processor.
99 *
100 * The processor state controls the life cycle of processors at the lowest
101 * level.  No multi-threading or other high-level concepts matter here.
102 *
103 * State changes must be initiated via _Per_CPU_State_change().  This function
104 * may not return in case someone requested a shutdown.  The
105 * _SMP_Send_message() function will be used to notify other processors about
106 * state changes if the other processor is in the up state.
107 *
108 * Due to the sequential nature of the basic system initialization one
109 * processor has a special role.  It is the processor executing the boot_card()
110 * function.  This processor is called the boot processor.  All other
111 * processors are called secondary.
112 *
113 * @dot
114 * digraph states {
115 *   i [label="PER_CPU_STATE_INITIAL"];
116 *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
117 *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
118 *   u [label="PER_CPU_STATE_UP"];
119 *   s [label="PER_CPU_STATE_SHUTDOWN"];
120 *   i -> rdy [label="processor\ncompleted initialization"];
121 *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
122 *   reqsm -> u [label="processor\nstarts multitasking"];
123 *   i -> s;
124 *   rdy -> s;
125 *   reqsm -> s;
126 *   u -> s;
127 * }
128 * @enddot
129 */
130typedef enum {
131  /**
132   * @brief The per CPU controls are initialized to zero.
133   *
134   * The boot processor executes the sequential boot code in this state.  The
135   * secondary processors should perform their basic initialization now and
136   * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
137   * is complete.
138   */
139  PER_CPU_STATE_INITIAL,
140
141  /**
142   * @brief Processor is ready to start multitasking.
143   *
144   * The secondary processor performed its basic initialization and is ready to
145   * receive inter-processor interrupts.  Interrupt delivery must be disabled
146   * in this state, but requested inter-processor interrupts must be recorded
147   * and must be delivered once the secondary processor enables interrupts for
148   * the first time.  The boot processor will wait for all secondary processors
149   * to change into this state.  In case a secondary processor does not reach
150   * this state the system will not start.  The secondary processors wait now
151   * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set
152   * by the boot processor once all secondary processors reached the
153   * PER_CPU_STATE_READY_TO_START_MULTITASKING state.
154   */
155  PER_CPU_STATE_READY_TO_START_MULTITASKING,
156
157  /**
158   * @brief Multitasking start of processor is requested.
159   *
160   * The boot processor completed system initialization and is about to perform
161   * a context switch to its heir thread.  Secondary processors should now
162   * issue a context switch to the heir thread.  This normally enables
163   * interrupts on the processor for the first time.
164   */
165  PER_CPU_STATE_REQUEST_START_MULTITASKING,
166
167  /**
168   * @brief Normal multitasking state.
169   */
170  PER_CPU_STATE_UP,
171
172  /**
173   * @brief This is the terminal state.
174   */
175  PER_CPU_STATE_SHUTDOWN
176} Per_CPU_State;
177
178#endif /* defined( RTEMS_SMP ) */
179
180/**
181 * @brief Per-CPU statistics.
182 */
183typedef struct {
184#if defined( RTEMS_PROFILING )
185  /**
186   * @brief The thread dispatch disabled begin instant in CPU counter ticks.
187   *
188   * This value is used to measure the time of disabled thread dispatching.
189   */
190  CPU_Counter_ticks thread_dispatch_disabled_instant;
191
192  /**
193   * @brief The maximum time of disabled thread dispatching in CPU counter
194   * ticks.
195   */
196  CPU_Counter_ticks max_thread_dispatch_disabled_time;
197
198  /**
199   * @brief The maximum time spent to process a single sequence of nested
200   * interrupts in CPU counter ticks.
201   *
202   * This is the time interval between the change of the interrupt nest level
203   * from zero to one and the change back from one to zero.
204   */
205  CPU_Counter_ticks max_interrupt_time;
206
207  /**
208   * @brief The maximum interrupt delay in CPU counter ticks if supported by
209   * the hardware.
210   */
211  CPU_Counter_ticks max_interrupt_delay;
212
213  /**
214   * @brief Count of times when the thread dispatch disable level changes from
215   * zero to one in thread context.
216   *
217   * This value may overflow.
218   */
219  uint64_t thread_dispatch_disabled_count;
220
221  /**
222   * @brief Total time of disabled thread dispatching in CPU counter ticks.
223   *
224   * The average time of disabled thread dispatching is the total time of
225   * disabled thread dispatching divided by the thread dispatch disabled
226   * count.
227   *
228   * This value may overflow.
229   */
230  uint64_t total_thread_dispatch_disabled_time;
231
232  /**
233   * @brief Count of times when the interrupt nest level changes from zero to
234   * one.
235   *
236   * This value may overflow.
237   */
238  uint64_t interrupt_count;
239
240  /**
241   * @brief Total time of interrupt processing in CPU counter ticks.
242   *
243   * The average time of interrupt processing is the total time of interrupt
244   * processing divided by the interrupt count.
245   *
246   * This value may overflow.
247   */
248  uint64_t total_interrupt_time;
249#endif /* defined( RTEMS_PROFILING ) */
250} Per_CPU_Stats;
251
252/**
253 * @brief Per-CPU watchdog header index.
254 */
255typedef enum {
256  /**
257   * @brief Index for tick clock per-CPU watchdog header.
258   *
259   * The reference time point for the tick clock is the system start.  The
260   * clock resolution is one system clock tick.  It is used for the system
261   * clock tick based time services.
262   */
263  PER_CPU_WATCHDOG_TICKS,
264
265  /**
266   * @brief Index for realtime clock per-CPU watchdog header.
267   *
268   * The reference time point for the realtime clock is the POSIX Epoch.  The
269   * clock resolution is one nanosecond.  It is used for the time of day
270   * services and the POSIX services using CLOCK_REALTIME.
271   */
272  PER_CPU_WATCHDOG_REALTIME,
273
274  /**
275   * @brief Index for monotonic clock per-CPU watchdog header.
276   *
277   * The reference time point for the monotonic clock is the system start.  The
278   * clock resolution is one nanosecond.  It is used for the POSIX services
279   * using CLOCK_MONOTONIC.
280   */
281  PER_CPU_WATCHDOG_MONOTONIC,
282
283  /**
284   * @brief Count of per-CPU watchdog headers.
285   */
286  PER_CPU_WATCHDOG_COUNT
287} Per_CPU_Watchdog_index;
288
289/**
290 *  @brief Per CPU Core Structure
291 *
292 *  This structure is used to hold per core state information.
293 */
294typedef struct Per_CPU_Control {
295  #if CPU_PER_CPU_CONTROL_SIZE > 0
296    /**
297     * @brief CPU port specific control.
298     */
299    CPU_Per_CPU_control cpu_per_cpu;
300  #endif
301
302  /**
303   * @brief The interrupt stack low address for this processor.
304   */
305  void *interrupt_stack_low;
306
307  /**
308   * @brief The interrupt stack high address for this processor.
309   */
310  void *interrupt_stack_high;
311
312  /**
313   *  This contains the current interrupt nesting level on this
314   *  CPU.
315   */
316  uint32_t isr_nest_level;
317
318  /**
319   * @brief Indicetes if an ISR thread dispatch is disabled.
320   *
321   * This flag is context switched with each thread.  It indicates that this
322   * thread has an interrupt stack frame on its stack.  By using this flag, we
323   * can avoid nesting more interrupt dispatching attempts on a previously
324   * interrupted thread's stack.
325   */
326  uint32_t isr_dispatch_disable;
327
328  /**
329   * @brief The thread dispatch critical section nesting counter which is used
330   * to prevent context switches at inopportune moments.
331   */
332  volatile uint32_t thread_dispatch_disable_level;
333
334  /**
335   * @brief This is set to true when this processor needs to run the thread
336   * dispatcher.
337   *
338   * It is volatile since interrupts may alter this flag.
339   *
340   * This member is not protected by a lock and must be accessed only by this
341   * processor.  Code (e.g. scheduler and post-switch action requests) running
342   * on another processors must use an inter-processor interrupt to set the
343   * thread dispatch necessary indicator to true.
344   *
345   * @see _Thread_Get_heir_and_make_it_executing().
346   */
347  volatile bool dispatch_necessary;
348
349  /*
350   * Ensure that the executing member is at least 4-byte aligned, see
351   * PER_CPU_OFFSET_EXECUTING.  This is necessary on CPU ports with relaxed
352   * alignment restrictions, e.g. type alignment is less than the type size.
353   */
354  bool reserved_for_executing_alignment[ 3 ];
355
356  /**
357   * @brief This is the thread executing on this processor.
358   *
359   * This member is not protected by a lock.  The only writer is this
360   * processor.
361   *
362   * On SMP configurations a thread may be registered as executing on more than
363   * one processor in case a thread migration is in progress.  On SMP
364   * configurations use _Thread_Is_executing_on_a_processor() to figure out if
365   * a thread context is executing on a processor.
366   */
367  struct _Thread_Control *executing;
368
369  /**
370   * @brief This is the heir thread for this processor.
371   *
372   * This member is not protected by a lock.  The only writer after
373   * multitasking start is the scheduler owning this processor.  It is assumed
374   * that stores to pointers are atomic on all supported SMP architectures.
375   * The CPU port specific code (inter-processor interrupt handling and
376   * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
377   * last value written.
378   *
379   * A thread can be a heir on at most one processor in the system.
380   *
381   * @see _Thread_Get_heir_and_make_it_executing().
382   */
383  struct _Thread_Control *heir;
384
385#if defined(RTEMS_SMP)
386  CPU_Interrupt_frame Interrupt_frame;
387#endif
388
389  /**
390   * @brief The CPU usage timestamp contains the time point of the last heir
391   * thread change or last CPU usage update of the executing thread of this
392   * processor.
393   *
394   * Protected by the scheduler lock.
395   *
396   * @see _Scheduler_Update_heir(), _Thread_Dispatch_update_heir() and
397   * _Thread_Get_CPU_time_used().
398   */
399  Timestamp_Control cpu_usage_timestamp;
400
401  /**
402   * @brief Watchdog state for this processor.
403   */
404  struct {
405    /**
406     * @brief Protects all watchdog operations on this processor.
407     */
408    ISR_LOCK_MEMBER( Lock )
409
410    /**
411     * @brief Watchdog ticks on this processor used for monotonic clock
412     * watchdogs.
413     */
414    uint64_t ticks;
415
416    /**
417     * @brief Header for watchdogs.
418     *
419     * @see Per_CPU_Watchdog_index.
420     */
421    Watchdog_Header Header[ PER_CPU_WATCHDOG_COUNT ];
422  } Watchdog;
423
424  #if defined( RTEMS_SMP )
425    /**
426     * @brief This lock protects some members of this structure.
427     */
428    ISR_lock_Control Lock;
429
430    /**
431     * @brief Lock context used to acquire all per-CPU locks.
432     *
433     * This member is protected by the Per_CPU_Control::Lock lock.
434     *
435     * @see _Per_CPU_Acquire_all().
436     */
437    ISR_lock_Context Lock_context;
438
439    /**
440     * @brief Chain of threads in need for help.
441     *
442     * This member is protected by the Per_CPU_Control::Lock lock.
443     */
444    Chain_Control Threads_in_need_for_help;
445
446    /**
447     * @brief Bit field for SMP messages.
448     *
449     * This member is not protected locks.  Atomic operations are used to set
450     * and get the message bits.
451     */
452    Atomic_Ulong message;
453
454    struct {
455      /**
456       * @brief The scheduler control of the scheduler owning this processor.
457       *
458       * This pointer is NULL in case this processor is currently not used by a
459       * scheduler instance.
460       */
461      const struct _Scheduler_Control *control;
462
463      /**
464       * @brief The scheduler context of the scheduler owning this processor.
465       *
466       * This pointer is NULL in case this processor is currently not used by a
467       * scheduler instance.
468       */
469      const struct Scheduler_Context *context;
470
471      /**
472       * @brief The idle thread for this processor in case it is online and
473       * currently not used by a scheduler instance.
474       */
475      struct _Thread_Control *idle_if_online_and_unused;
476    } Scheduler;
477
478    /**
479     * @brief Begin of the per-CPU data area.
480     *
481     * Contains items defined via PER_CPU_DATA_ITEM().
482     */
483    char *data;
484
485    /**
486     * @brief Indicates the current state of the CPU.
487     *
488     * This member is protected by the _Per_CPU_State_lock lock.
489     *
490     * @see _Per_CPU_State_change().
491     */
492    Per_CPU_State state;
493
494    /**
495     * @brief FIFO list of jobs to be performed by this processor.
496     *
497     * @see _SMP_Multicast_action().
498     */
499    struct {
500      /**
501       * @brief Lock to protect the FIFO list of jobs to be performed by this
502       * processor.
503       */
504      ISR_lock_Control Lock;
505
506      /**
507       * @brief Head of the FIFO list of jobs to be performed by this
508       * processor.
509       *
510       * This member is protected by the Per_CPU_Control::Jobs::Lock lock.
511       */
512      struct Per_CPU_Job *head;
513
514      /**
515       * @brief Tail of the FIFO list of jobs to be performed by this
516       * processor.
517       *
518       * This member is only valid if the head is not @c NULL.
519       *
520       * This member is protected by the Per_CPU_Control::Jobs::Lock lock.
521       */
522      struct Per_CPU_Job **tail;
523    } Jobs;
524
525    /**
526     * @brief Indicates if the processor has been successfully started via
527     * _CPU_SMP_Start_processor().
528     */
529    bool online;
530
531    /**
532     * @brief Indicates if the processor is the one that performed the initial
533     * system initialization.
534     */
535    bool boot;
536  #endif
537
538  struct Record_Control *record;
539
540  Per_CPU_Stats Stats;
541} Per_CPU_Control;
542
543#if defined( RTEMS_SMP )
544typedef struct {
545  Per_CPU_Control per_cpu;
546  char unused_space_for_cache_line_alignment
547    [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
548} Per_CPU_Control_envelope;
549#else
550typedef struct {
551  Per_CPU_Control per_cpu;
552} Per_CPU_Control_envelope;
553#endif
554
555/**
556 *  @brief Set of Per CPU Core Information
557 *
558 *  This is an array of per CPU core information.
559 */
560extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
561
562#define _Per_CPU_Acquire( cpu, lock_context ) \
563  _ISR_lock_Acquire( &( cpu )->Lock, lock_context )
564
565#define _Per_CPU_Release( cpu, lock_context ) \
566  _ISR_lock_Release( &( cpu )->Lock, lock_context )
567
568/*
569 * If we get the current processor index in a context which allows thread
570 * dispatching, then we may already run on another processor right after the
571 * read instruction.  There are very few cases in which this makes sense (here
572 * we can use _Per_CPU_Get_snapshot()).  All other places must use
573 * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
574 */
575#if defined( _CPU_Get_current_per_CPU_control )
576  #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
577#else
578  #define _Per_CPU_Get_snapshot() \
579    ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
580#endif
581
582#if defined( RTEMS_SMP )
583static inline Per_CPU_Control *_Per_CPU_Get( void )
584{
585  Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
586
587  _Assert(
588    cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
589  );
590
591  return cpu_self;
592}
593#else
594#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
595#endif
596
597static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
598{
599  return &_Per_CPU_Information[ index ].per_cpu;
600}
601
602static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
603{
604  const Per_CPU_Control_envelope *per_cpu_envelope =
605    ( const Per_CPU_Control_envelope * ) cpu;
606
607  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
608}
609
610static inline struct _Thread_Control *_Per_CPU_Get_executing(
611  const Per_CPU_Control *cpu
612)
613{
614  return cpu->executing;
615}
616
617static inline bool _Per_CPU_Is_processor_online(
618  const Per_CPU_Control *cpu
619)
620{
621#if defined( RTEMS_SMP )
622  return cpu->online;
623#else
624  (void) cpu;
625
626  return true;
627#endif
628}
629
630static inline bool _Per_CPU_Is_boot_processor(
631  const Per_CPU_Control *cpu
632)
633{
634#if defined( RTEMS_SMP )
635  return cpu->boot;
636#else
637  (void) cpu;
638
639  return true;
640#endif
641}
642
643RTEMS_INLINE_ROUTINE void _Per_CPU_Acquire_all(
644  ISR_lock_Context *lock_context
645)
646{
647#if defined(RTEMS_SMP)
648  uint32_t         cpu_max;
649  uint32_t         cpu_index;
650  Per_CPU_Control *previous_cpu;
651
652  cpu_max = _SMP_Get_processor_maximum();
653  previous_cpu = _Per_CPU_Get_by_index( 0 );
654
655  _ISR_lock_ISR_disable( lock_context );
656  _Per_CPU_Acquire( previous_cpu, lock_context );
657
658  for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
659     Per_CPU_Control *cpu;
660
661     cpu = _Per_CPU_Get_by_index( cpu_index );
662     _Per_CPU_Acquire( cpu, &previous_cpu->Lock_context );
663     previous_cpu = cpu;
664  }
665#else
666  _ISR_lock_ISR_disable( lock_context );
667#endif
668}
669
670RTEMS_INLINE_ROUTINE void _Per_CPU_Release_all(
671  ISR_lock_Context *lock_context
672)
673{
674#if defined(RTEMS_SMP)
675  uint32_t         cpu_max;
676  uint32_t         cpu_index;
677  Per_CPU_Control *cpu;
678
679  cpu_max = _SMP_Get_processor_maximum();
680  cpu = _Per_CPU_Get_by_index( cpu_max - 1 );
681
682  for ( cpu_index = cpu_max - 1 ; cpu_index > 0 ; --cpu_index ) {
683     Per_CPU_Control *previous_cpu;
684
685     previous_cpu = _Per_CPU_Get_by_index( cpu_index - 1 );
686     _Per_CPU_Release( cpu, &previous_cpu->Lock_context );
687     cpu = previous_cpu;
688  }
689
690  _Per_CPU_Release( cpu, lock_context );
691  _ISR_lock_ISR_enable( lock_context );
692#else
693  _ISR_lock_ISR_enable( lock_context );
694#endif
695}
696
697#if defined( RTEMS_SMP )
698
699/**
700 *  @brief Allocate and Initialize Per CPU Structures
701 *
702 *  This method allocates and initialize the per CPU structure.
703 */
704void _Per_CPU_Initialize(void);
705
706void _Per_CPU_State_change(
707  Per_CPU_Control *cpu,
708  Per_CPU_State new_state
709);
710
711/**
712 * @brief Waits for a processor to change into a non-initial state.
713 *
714 * This function should be called only in _CPU_SMP_Start_processor() if
715 * required by the CPU port or BSP.
716 *
717 * @code
718 * bool _CPU_SMP_Start_processor(uint32_t cpu_index)
719 * {
720 *   uint32_t timeout = 123456;
721 *
722 *   start_the_processor(cpu_index);
723 *
724 *   return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout);
725 * }
726 * @endcode
727 *
728 * @param[in] cpu_index The processor index.
729 * @param[in] timeout_in_ns The timeout in nanoseconds.  Use a value of zero to
730 * wait forever if necessary.
731 *
732 * @retval true The processor is in a non-initial state.
733 * @retval false The timeout expired before the processor reached a non-initial
734 * state.
735 */
736bool _Per_CPU_State_wait_for_non_initial_state(
737  uint32_t cpu_index,
738  uint32_t timeout_in_ns
739);
740
741/**
742 * @brief Performs the jobs of the specified processor.
743 *
744 * @param[in, out] cpu The jobs of this processor will be performed.
745 */
746void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu );
747
748#endif /* defined( RTEMS_SMP ) */
749
750/*
751 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
752 * Thus when built for non-SMP, there should be no performance penalty.
753 */
754#define _Thread_Dispatch_disable_level \
755  _Per_CPU_Get()->thread_dispatch_disable_level
756#define _Thread_Heir \
757  _Per_CPU_Get()->heir
758
759#if defined(_CPU_Get_thread_executing)
760#define _Thread_Executing \
761  _CPU_Get_thread_executing()
762#else
763#define _Thread_Executing \
764  _Per_CPU_Get_executing( _Per_CPU_Get() )
765#endif
766
767#define _ISR_Nest_level \
768  _Per_CPU_Get()->isr_nest_level
769#define _CPU_Interrupt_stack_low \
770  _Per_CPU_Get()->interrupt_stack_low
771#define _CPU_Interrupt_stack_high \
772  _Per_CPU_Get()->interrupt_stack_high
773#define _Thread_Dispatch_necessary \
774  _Per_CPU_Get()->dispatch_necessary
775
776/**
777 * @brief Returns the thread control block of the executing thread.
778 *
779 * This function can be called in any thread context.  On SMP configurations,
780 * interrupts are disabled to ensure that the processor index is used
781 * consistently if no CPU port specific method is available to get the
782 * executing thread.
783 *
784 * @return The thread control block of the executing thread.
785 */
786RTEMS_INLINE_ROUTINE struct _Thread_Control *_Thread_Get_executing( void )
787{
788  struct _Thread_Control *executing;
789
790  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
791    ISR_Level level;
792
793    _ISR_Local_disable( level );
794  #endif
795
796  executing = _Thread_Executing;
797
798  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
799    _ISR_Local_enable( level );
800  #endif
801
802  return executing;
803}
804
805/**@}*/
806
807#endif /* !defined( ASM ) */
808
809#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
810
811#define PER_CPU_INTERRUPT_STACK_LOW \
812  CPU_PER_CPU_CONTROL_SIZE
813#define PER_CPU_INTERRUPT_STACK_HIGH \
814  PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
815
816#define INTERRUPT_STACK_LOW \
817  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
818#define INTERRUPT_STACK_HIGH \
819  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
820
821/*
822 *  These are the offsets of the required elements in the per CPU table.
823 */
824#define PER_CPU_ISR_NEST_LEVEL \
825  PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
826#define PER_CPU_ISR_DISPATCH_DISABLE \
827  PER_CPU_ISR_NEST_LEVEL + 4
828#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
829  PER_CPU_ISR_DISPATCH_DISABLE + 4
830#define PER_CPU_DISPATCH_NEEDED \
831  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
832#define PER_CPU_OFFSET_EXECUTING \
833  PER_CPU_DISPATCH_NEEDED + 4
834#define PER_CPU_OFFSET_HEIR \
835  PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
836#if defined(RTEMS_SMP)
837#define PER_CPU_INTERRUPT_FRAME_AREA \
838  PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
839#endif
840
841#define THREAD_DISPATCH_DISABLE_LEVEL \
842  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
843#define ISR_NEST_LEVEL \
844  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
845#define DISPATCH_NEEDED \
846  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
847
848#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
849
850#ifdef __cplusplus
851}
852#endif
853
854#endif
855/* end of include file */
Note: See TracBrowser for help on using the repository browser.