source: rtems/cpukit/score/include/rtems/score/percpu.h @ 05ca53d

5
Last change on this file since 05ca53d was 05ca53d, checked in by Sebastian Huber <sebastian.huber@…>, on 10/31/16 at 12:08:33

rtems: Add scheduler processor add/remove

Update #2797.

  • Property mode set to 100644
File size: 21.8 KB
Line 
1/**
2 *  @file  rtems/score/percpu.h
3 *
4 *  This include file defines the per CPU information required
5 *  by RTEMS.
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.org/license/LICENSE.
15 */
16
17#ifndef _RTEMS_PERCPU_H
18#define _RTEMS_PERCPU_H
19
20#include <rtems/score/cpuimpl.h>
21
22#if defined( ASM )
23  #include <rtems/asm.h>
24#else
25  #include <rtems/score/assert.h>
26  #include <rtems/score/chain.h>
27  #include <rtems/score/isrlock.h>
28  #include <rtems/score/smp.h>
29  #include <rtems/score/smplock.h>
30  #include <rtems/score/timestamp.h>
31  #include <rtems/score/watchdog.h>
32#endif
33
34#ifdef __cplusplus
35extern "C" {
36#endif
37
38#if defined( RTEMS_SMP )
39  /*
40   * This ensures that on SMP configurations the individual per-CPU controls
41   * are on different cache lines to prevent false sharing.  This define can be
42   * used in assembler code to easily get the per-CPU control for a particular
43   * processor.
44   */
45  #if defined( RTEMS_PROFILING )
46    #define PER_CPU_CONTROL_SIZE_LOG2 9
47  #elif defined( RTEMS_DEBUG )
48    #define PER_CPU_CONTROL_SIZE_LOG2 8
49  #else
50    #define PER_CPU_CONTROL_SIZE_LOG2 7
51  #endif
52
53  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
54#endif
55
56#if !defined( ASM )
57
58struct _Thread_Control;
59
60struct Scheduler_Context;
61
62/**
63 *  @defgroup PerCPU RTEMS Per CPU Information
64 *
65 *  @ingroup Score
66 *
67 *  This defines the per CPU state information required by RTEMS
68 *  and the BSP.  In an SMP configuration, there will be multiple
69 *  instances of this data structure -- one per CPU -- and the
70 *  current CPU number will be used as the index.
71 */
72
73/**@{*/
74
75#if defined( RTEMS_SMP )
76
77/**
78 * @brief State of a processor.
79 *
80 * The processor state controls the life cycle of processors at the lowest
81 * level.  No multi-threading or other high-level concepts matter here.
82 *
83 * State changes must be initiated via _Per_CPU_State_change().  This function
84 * may not return in case someone requested a shutdown.  The
85 * _SMP_Send_message() function will be used to notify other processors about
86 * state changes if the other processor is in the up state.
87 *
88 * Due to the sequential nature of the basic system initialization one
89 * processor has a special role.  It is the processor executing the boot_card()
90 * function.  This processor is called the boot processor.  All other
91 * processors are called secondary.
92 *
93 * @dot
94 * digraph states {
95 *   i [label="PER_CPU_STATE_INITIAL"];
96 *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
97 *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
98 *   u [label="PER_CPU_STATE_UP"];
99 *   s [label="PER_CPU_STATE_SHUTDOWN"];
100 *   i -> rdy [label="processor\ncompleted initialization"];
101 *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
102 *   reqsm -> u [label="processor\nstarts multitasking"];
103 *   i -> s;
104 *   rdy -> s;
105 *   reqsm -> s;
106 *   u -> s;
107 * }
108 * @enddot
109 */
110typedef enum {
111  /**
112   * @brief The per CPU controls are initialized to zero.
113   *
114   * The boot processor executes the sequential boot code in this state.  The
115   * secondary processors should perform their basic initialization now and
116   * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
117   * is complete.
118   */
119  PER_CPU_STATE_INITIAL,
120
121  /**
122   * @brief Processor is ready to start multitasking.
123   *
124   * The secondary processor performed its basic initialization and is ready to
125   * receive inter-processor interrupts.  Interrupt delivery must be disabled
126   * in this state, but requested inter-processor interrupts must be recorded
127   * and must be delivered once the secondary processor enables interrupts for
128   * the first time.  The boot processor will wait for all secondary processors
129   * to change into this state.  In case a secondary processor does not reach
130   * this state the system will not start.  The secondary processors wait now
131   * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set
132   * by the boot processor once all secondary processors reached the
133   * PER_CPU_STATE_READY_TO_START_MULTITASKING state.
134   */
135  PER_CPU_STATE_READY_TO_START_MULTITASKING,
136
137  /**
138   * @brief Multitasking start of processor is requested.
139   *
140   * The boot processor completed system initialization and is about to perform
141   * a context switch to its heir thread.  Secondary processors should now
142   * issue a context switch to the heir thread.  This normally enables
143   * interrupts on the processor for the first time.
144   */
145  PER_CPU_STATE_REQUEST_START_MULTITASKING,
146
147  /**
148   * @brief Normal multitasking state.
149   */
150  PER_CPU_STATE_UP,
151
152  /**
153   * @brief This is the terminal state.
154   */
155  PER_CPU_STATE_SHUTDOWN
156} Per_CPU_State;
157
158#endif /* defined( RTEMS_SMP ) */
159
160/**
161 * @brief Per-CPU statistics.
162 */
163typedef struct {
164#if defined( RTEMS_PROFILING )
165  /**
166   * @brief The thread dispatch disabled begin instant in CPU counter ticks.
167   *
168   * This value is used to measure the time of disabled thread dispatching.
169   */
170  CPU_Counter_ticks thread_dispatch_disabled_instant;
171
172  /**
173   * @brief The maximum time of disabled thread dispatching in CPU counter
174   * ticks.
175   */
176  CPU_Counter_ticks max_thread_dispatch_disabled_time;
177
178  /**
179   * @brief The maximum time spent to process a single sequence of nested
180   * interrupts in CPU counter ticks.
181   *
182   * This is the time interval between the change of the interrupt nest level
183   * from zero to one and the change back from one to zero.
184   */
185  CPU_Counter_ticks max_interrupt_time;
186
187  /**
188   * @brief The maximum interrupt delay in CPU counter ticks if supported by
189   * the hardware.
190   */
191  CPU_Counter_ticks max_interrupt_delay;
192
193  /**
194   * @brief Count of times when the thread dispatch disable level changes from
195   * zero to one in thread context.
196   *
197   * This value may overflow.
198   */
199  uint64_t thread_dispatch_disabled_count;
200
201  /**
202   * @brief Total time of disabled thread dispatching in CPU counter ticks.
203   *
204   * The average time of disabled thread dispatching is the total time of
205   * disabled thread dispatching divided by the thread dispatch disabled
206   * count.
207   *
208   * This value may overflow.
209   */
210  uint64_t total_thread_dispatch_disabled_time;
211
212  /**
213   * @brief Count of times when the interrupt nest level changes from zero to
214   * one.
215   *
216   * This value may overflow.
217   */
218  uint64_t interrupt_count;
219
220  /**
221   * @brief Total time of interrupt processing in CPU counter ticks.
222   *
223   * The average time of interrupt processing is the total time of interrupt
224   * processing divided by the interrupt count.
225   *
226   * This value may overflow.
227   */
228  uint64_t total_interrupt_time;
229#endif /* defined( RTEMS_PROFILING ) */
230} Per_CPU_Stats;
231
232/**
233 * @brief Per-CPU watchdog header index.
234 */
235typedef enum {
236  /**
237   * @brief Index for relative per-CPU watchdog header.
238   *
239   * The reference time point for this header is current ticks value
240   * during insert.  Time is measured in clock ticks.
241   */
242  PER_CPU_WATCHDOG_RELATIVE,
243
244  /**
245   * @brief Index for absolute per-CPU watchdog header.
246   *
247   * The reference time point for this header is the POSIX Epoch.  Time is
248   * measured in nanoseconds since POSIX Epoch.
249   */
250  PER_CPU_WATCHDOG_ABSOLUTE,
251
252  /**
253   * @brief Count of per-CPU watchdog headers.
254   */
255  PER_CPU_WATCHDOG_COUNT
256} Per_CPU_Watchdog_index;
257
258/**
259 *  @brief Per CPU Core Structure
260 *
261 *  This structure is used to hold per core state information.
262 */
263typedef struct Per_CPU_Control {
264  #if CPU_PER_CPU_CONTROL_SIZE > 0
265    /**
266     * @brief CPU port specific control.
267     */
268    CPU_Per_CPU_control cpu_per_cpu;
269  #endif
270
271  #if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
272      (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
273    /**
274     * This contains a pointer to the lower range of the interrupt stack for
275     * this CPU.  This is the address allocated and freed.
276     */
277    void  *interrupt_stack_low;
278
279    /**
280     * This contains a pointer to the interrupt stack pointer for this CPU.
281     * It will be loaded at the beginning on an ISR.
282     */
283    void  *interrupt_stack_high;
284  #endif
285
286  /**
287   *  This contains the current interrupt nesting level on this
288   *  CPU.
289   */
290  uint32_t isr_nest_level;
291
292  /**
293   * @brief The thread dispatch critical section nesting counter which is used
294   * to prevent context switches at inopportune moments.
295   */
296  volatile uint32_t thread_dispatch_disable_level;
297
298  /**
299   * @brief This is the thread executing on this processor.
300   *
301   * This field is not protected by a lock.  The only writer is this processor.
302   *
303   * On SMP configurations a thread may be registered as executing on more than
304   * one processor in case a thread migration is in progress.  On SMP
305   * configurations use _Thread_Is_executing_on_a_processor() to figure out if
306   * a thread context is executing on a processor.
307   */
308  struct _Thread_Control *executing;
309
310  /**
311   * @brief This is the heir thread for this processor.
312   *
313   * This field is not protected by a lock.  The only writer after multitasking
314   * start is the scheduler owning this processor.  It is assumed that stores
315   * to pointers are atomic on all supported SMP architectures.  The CPU port
316   * specific code (inter-processor interrupt handling and
317   * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
318   * last value written.
319   *
320   * A thread can be a heir on at most one processor in the system.
321   *
322   * @see _Thread_Get_heir_and_make_it_executing().
323   */
324  struct _Thread_Control *heir;
325
326  /**
327   * @brief This is set to true when this processor needs to run the thread
328   * dispatcher.
329   *
330   * It is volatile since interrupts may alter this flag.
331   *
332   * This field is not protected by a lock and must be accessed only by this
333   * processor.  Code (e.g. scheduler and post-switch action requests) running
334   * on another processors must use an inter-processor interrupt to set the
335   * thread dispatch necessary indicator to true.
336   *
337   * @see _Thread_Get_heir_and_make_it_executing().
338   */
339  volatile bool dispatch_necessary;
340
341  /**
342   * @brief The CPU usage timestamp contains the time point of the last heir
343   * thread change or last CPU usage update of the executing thread of this
344   * processor.
345   *
346   * Protected by the scheduler lock.
347   *
348   * @see _Scheduler_Update_heir(), _Thread_Dispatch_update_heir() and
349   * _Thread_Get_CPU_time_used().
350   */
351  Timestamp_Control cpu_usage_timestamp;
352
353  /**
354   * @brief Watchdog state for this processor.
355   */
356  struct {
357    /**
358     * @brief Protects all watchdog operations on this processor.
359     */
360    ISR_LOCK_MEMBER( Lock )
361
362    /**
363     * @brief Watchdog ticks on this processor used for relative watchdogs.
364     */
365    uint64_t ticks;
366
367    /**
368     * @brief Header for watchdogs.
369     *
370     * @see Per_CPU_Watchdog_index.
371     */
372    Watchdog_Header Header[ PER_CPU_WATCHDOG_COUNT ];
373  } Watchdog;
374
375  #if defined( RTEMS_SMP )
376    /**
377     * @brief This lock protects some parts of the low-level thread dispatching.
378     *
379     * We must use a ticket lock here since we cannot transport a local context
380     * through the context switch.
381     *
382     * @see _Thread_Dispatch().
383     */
384    SMP_ticket_lock_Control Lock;
385
386    #if defined( RTEMS_PROFILING )
387      /**
388       * @brief Lock statistics for the per-CPU lock.
389       */
390      SMP_lock_Stats Lock_stats;
391
392      /**
393       * @brief Lock statistics context for the per-CPU lock.
394       */
395      SMP_lock_Stats_context Lock_stats_context;
396    #endif
397
398    /**
399     * @brief Chain of threads in need for help.
400     *
401     * This field is protected by the Per_CPU_Control::Lock lock.
402     */
403    Chain_Control Threads_in_need_for_help;
404
405    /**
406     * @brief Bit field for SMP messages.
407     *
408     * This bit field is not protected locks.  Atomic operations are used to
409     * set and get the message bits.
410     */
411    Atomic_Ulong message;
412
413    struct {
414      /**
415       * @brief The scheduler control of the scheduler owning this processor.
416       *
417       * This pointer is NULL in case this processor is currently not used by a
418       * scheduler instance.
419       */
420      const struct Scheduler_Control *control;
421
422      /**
423       * @brief The scheduler context of the scheduler owning this processor.
424       *
425       * This pointer is NULL in case this processor is currently not used by a
426       * scheduler instance.
427       */
428      const struct Scheduler_Context *context;
429
430      /**
431       * @brief The idle thread for this processor in case it is online and
432       * currently not used by a scheduler instance.
433       */
434      struct _Thread_Control *idle_if_online_and_unused;
435    } Scheduler;
436
437    /**
438     * @brief Indicates the current state of the CPU.
439     *
440     * This field is protected by the _Per_CPU_State_lock lock.
441     *
442     * @see _Per_CPU_State_change().
443     */
444    Per_CPU_State state;
445
446    /**
447     * @brief Action to be executed by this processor in the
448     * SYSTEM_STATE_BEFORE_MULTITASKING state on behalf of the boot processor.
449     *
450     * @see _SMP_Before_multitasking_action().
451     */
452    Atomic_Uintptr before_multitasking_action;
453
454    /**
455     * @brief Indicates if the processor has been successfully started via
456     * _CPU_SMP_Start_processor().
457     */
458    bool online;
459
460    /**
461     * @brief Indicates if the processor is the one that performed the initial
462     * system initialization.
463     */
464    bool boot;
465  #endif
466
467  Per_CPU_Stats Stats;
468} Per_CPU_Control;
469
470#if defined( RTEMS_SMP )
471typedef struct {
472  Per_CPU_Control per_cpu;
473  char unused_space_for_cache_line_alignment
474    [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
475} Per_CPU_Control_envelope;
476#else
477typedef struct {
478  Per_CPU_Control per_cpu;
479} Per_CPU_Control_envelope;
480#endif
481
482/**
483 *  @brief Set of Per CPU Core Information
484 *
485 *  This is an array of per CPU core information.
486 */
487extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
488
489#if defined( RTEMS_SMP )
490#define _Per_CPU_Acquire( cpu ) \
491  _SMP_ticket_lock_Acquire( \
492    &( cpu )->Lock, \
493    &( cpu )->Lock_stats, \
494    &( cpu )->Lock_stats_context \
495  )
496#else
497#define _Per_CPU_Acquire( cpu ) \
498  do { \
499    (void) ( cpu ); \
500  } while ( 0 )
501#endif
502
503#if defined( RTEMS_SMP )
504#define _Per_CPU_Release( cpu ) \
505  _SMP_ticket_lock_Release( \
506    &( cpu )->Lock, \
507    &( cpu )->Lock_stats_context \
508  )
509#else
510#define _Per_CPU_Release( cpu ) \
511  do { \
512    (void) ( cpu ); \
513  } while ( 0 )
514#endif
515
516#if defined( RTEMS_SMP )
517#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
518  do { \
519    _ISR_Local_disable( isr_cookie ); \
520    _Per_CPU_Acquire( cpu ); \
521  } while ( 0 )
522#else
523#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
524  do { \
525    _ISR_Local_disable( isr_cookie ); \
526    (void) ( cpu ); \
527  } while ( 0 )
528#endif
529
530#if defined( RTEMS_SMP )
531#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
532  do { \
533    _Per_CPU_Release( cpu ); \
534    _ISR_Local_enable( isr_cookie ); \
535  } while ( 0 )
536#else
537#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
538  do { \
539    (void) ( cpu ); \
540    _ISR_Local_enable( isr_cookie ); \
541  } while ( 0 )
542#endif
543
544#if defined( RTEMS_SMP )
545#define _Per_CPU_Acquire_all( isr_cookie ) \
546  do { \
547    uint32_t ncpus = _SMP_Get_processor_count(); \
548    uint32_t cpu; \
549    _ISR_Local_disable( isr_cookie ); \
550    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
551      _Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \
552    } \
553  } while ( 0 )
554#else
555#define _Per_CPU_Acquire_all( isr_cookie ) \
556  _ISR_Local_disable( isr_cookie )
557#endif
558
559#if defined( RTEMS_SMP )
560#define _Per_CPU_Release_all( isr_cookie ) \
561  do { \
562    uint32_t ncpus = _SMP_Get_processor_count(); \
563    uint32_t cpu; \
564    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
565      _Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \
566    } \
567    _ISR_Local_enable( isr_cookie ); \
568  } while ( 0 )
569#else
570#define _Per_CPU_Release_all( isr_cookie ) \
571  _ISR_Local_enable( isr_cookie )
572#endif
573
574/*
575 * If we get the current processor index in a context which allows thread
576 * dispatching, then we may already run on another processor right after the
577 * read instruction.  There are very few cases in which this makes sense (here
578 * we can use _Per_CPU_Get_snapshot()).  All other places must use
579 * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
580 */
581#if defined( _CPU_Get_current_per_CPU_control )
582  #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
583#else
584  #define _Per_CPU_Get_snapshot() \
585    ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
586#endif
587
588#if defined( RTEMS_SMP )
589static inline Per_CPU_Control *_Per_CPU_Get( void )
590{
591  Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
592
593  _Assert(
594    cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
595  );
596
597  return cpu_self;
598}
599#else
600#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
601#endif
602
603static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
604{
605  return &_Per_CPU_Information[ index ].per_cpu;
606}
607
608static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
609{
610  const Per_CPU_Control_envelope *per_cpu_envelope =
611    ( const Per_CPU_Control_envelope * ) cpu;
612
613  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
614}
615
616static inline struct _Thread_Control *_Per_CPU_Get_executing(
617  const Per_CPU_Control *cpu
618)
619{
620  return cpu->executing;
621}
622
623static inline bool _Per_CPU_Is_processor_online(
624  const Per_CPU_Control *cpu
625)
626{
627#if defined( RTEMS_SMP )
628  return cpu->online;
629#else
630  (void) cpu;
631
632  return true;
633#endif
634}
635
636static inline bool _Per_CPU_Is_boot_processor(
637  const Per_CPU_Control *cpu
638)
639{
640#if defined( RTEMS_SMP )
641  return cpu->boot;
642#else
643  (void) cpu;
644
645  return true;
646#endif
647}
648
649#if defined( RTEMS_SMP )
650
651/**
652 *  @brief Allocate and Initialize Per CPU Structures
653 *
654 *  This method allocates and initialize the per CPU structure.
655 */
656void _Per_CPU_Initialize(void);
657
658void _Per_CPU_State_change(
659  Per_CPU_Control *cpu,
660  Per_CPU_State new_state
661);
662
663/**
664 * @brief Waits for a processor to change into a non-initial state.
665 *
666 * This function should be called only in _CPU_SMP_Start_processor() if
667 * required by the CPU port or BSP.
668 *
669 * @code
670 * bool _CPU_SMP_Start_processor(uint32_t cpu_index)
671 * {
672 *   uint32_t timeout = 123456;
673 *
674 *   start_the_processor(cpu_index);
675 *
676 *   return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout);
677 * }
678 * @endcode
679 *
680 * @param[in] cpu_index The processor index.
681 * @param[in] timeout_in_ns The timeout in nanoseconds.  Use a value of zero to
682 * wait forever if necessary.
683 *
684 * @retval true The processor is in a non-initial state.
685 * @retval false The timeout expired before the processor reached a non-initial
686 * state.
687 */
688bool _Per_CPU_State_wait_for_non_initial_state(
689  uint32_t cpu_index,
690  uint32_t timeout_in_ns
691);
692
693#endif /* defined( RTEMS_SMP ) */
694
695/*
696 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
697 * Thus when built for non-SMP, there should be no performance penalty.
698 */
699#define _Thread_Dispatch_disable_level \
700  _Per_CPU_Get()->thread_dispatch_disable_level
701#define _Thread_Heir \
702  _Per_CPU_Get()->heir
703
704#if defined(_CPU_Get_thread_executing)
705#define _Thread_Executing \
706  _CPU_Get_thread_executing()
707#else
708#define _Thread_Executing \
709  _Per_CPU_Get_executing( _Per_CPU_Get() )
710#endif
711
712#define _ISR_Nest_level \
713  _Per_CPU_Get()->isr_nest_level
714#define _CPU_Interrupt_stack_low \
715  _Per_CPU_Get()->interrupt_stack_low
716#define _CPU_Interrupt_stack_high \
717  _Per_CPU_Get()->interrupt_stack_high
718#define _Thread_Dispatch_necessary \
719  _Per_CPU_Get()->dispatch_necessary
720
721/**
722 * @brief Returns the thread control block of the executing thread.
723 *
724 * This function can be called in any thread context.  On SMP configurations,
725 * interrupts are disabled to ensure that the processor index is used
726 * consistently if no CPU port specific method is available to get the
727 * executing thread.
728 *
729 * @return The thread control block of the executing thread.
730 */
731RTEMS_INLINE_ROUTINE struct _Thread_Control *_Thread_Get_executing( void )
732{
733  struct _Thread_Control *executing;
734
735  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
736    ISR_Level level;
737
738    _ISR_Local_disable( level );
739  #endif
740
741  executing = _Thread_Executing;
742
743  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
744    _ISR_Local_enable( level );
745  #endif
746
747  return executing;
748}
749
750/**@}*/
751
752#endif /* !defined( ASM ) */
753
754#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
755
756#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
757    (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
758  /*
759   *  If this CPU target lets RTEMS allocates the interrupt stack, then
760   *  we need to have places in the per CPU table to hold them.
761   */
762  #define PER_CPU_INTERRUPT_STACK_LOW \
763    CPU_PER_CPU_CONTROL_SIZE
764  #define PER_CPU_INTERRUPT_STACK_HIGH \
765    PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
766  #define PER_CPU_END_STACK             \
767    PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
768
769  #define INTERRUPT_STACK_LOW \
770    (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
771  #define INTERRUPT_STACK_HIGH \
772    (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
773#else
774  #define PER_CPU_END_STACK \
775    CPU_PER_CPU_CONTROL_SIZE
776#endif
777
778/*
779 *  These are the offsets of the required elements in the per CPU table.
780 */
781#define PER_CPU_ISR_NEST_LEVEL \
782  PER_CPU_END_STACK
783#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
784  PER_CPU_ISR_NEST_LEVEL + 4
785#define PER_CPU_OFFSET_EXECUTING \
786  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
787#define PER_CPU_OFFSET_HEIR \
788  PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
789#define PER_CPU_DISPATCH_NEEDED \
790  PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
791
792#define THREAD_DISPATCH_DISABLE_LEVEL \
793  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
794#define ISR_NEST_LEVEL \
795  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
796#define DISPATCH_NEEDED \
797  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
798
799#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
800
801#ifdef __cplusplus
802}
803#endif
804
805#endif
806/* end of include file */
Note: See TracBrowser for help on using the repository browser.