source: rtems/cpukit/score/include/rtems/score/percpu.h @ d37adfe5

5
Last change on this file since d37adfe5 was d37adfe5, checked in by Sebastian Huber <sebastian.huber@…>, on 03/03/16 at 06:02:03

score: Fix CPU time used by executing threads

The CPU time used of a thread was previously maintained per-processor
mostly during _Thread_Dispatch(). However, on SMP configurations the
actual processor of a thread is difficult to figure out since thread
dispatching is a highly asynchronous process (e.g. via inter-processor
interrupts). Only the intended processor of a thread is known to the
scheduler easily. Do the CPU usage accounting during thread heir
updates in the context of the scheduler operations. Provide the
function _Thread_Get_CPU_time_used() to get the CPU usage of a thread
using proper locks to get a consistent value.

Close #2627.

  • Property mode set to 100644
File size: 20.8 KB
Line 
1/**
2 *  @file  rtems/score/percpu.h
3 *
4 *  This include file defines the per CPU information required
5 *  by RTEMS.
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.org/license/LICENSE.
15 */
16
17#ifndef _RTEMS_PERCPU_H
18#define _RTEMS_PERCPU_H
19
20#include <rtems/score/cpu.h>
21
22#if defined( ASM )
23  #include <rtems/asm.h>
24#else
25  #include <rtems/score/assert.h>
26  #include <rtems/score/isrlock.h>
27  #include <rtems/score/smp.h>
28  #include <rtems/score/smplock.h>
29  #include <rtems/score/timestamp.h>
30  #include <rtems/score/watchdog.h>
31#endif
32
33#ifdef __cplusplus
34extern "C" {
35#endif
36
37#if defined( RTEMS_SMP )
38  /*
39   * This ensures that on SMP configurations the individual per-CPU controls
40   * are on different cache lines to prevent false sharing.  This define can be
41   * used in assembler code to easily get the per-CPU control for a particular
42   * processor.
43   */
44  #if defined( RTEMS_PROFILING )
45    #define PER_CPU_CONTROL_SIZE_LOG2 9
46  #else
47    #define PER_CPU_CONTROL_SIZE_LOG2 7
48  #endif
49
50  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
51#endif
52
53#if !defined( ASM )
54
55struct _Thread_Control;
56
57struct Scheduler_Context;
58
59/**
60 *  @defgroup PerCPU RTEMS Per CPU Information
61 *
62 *  @ingroup Score
63 *
64 *  This defines the per CPU state information required by RTEMS
65 *  and the BSP.  In an SMP configuration, there will be multiple
66 *  instances of this data structure -- one per CPU -- and the
67 *  current CPU number will be used as the index.
68 */
69
70/**@{*/
71
72#if defined( RTEMS_SMP )
73
74/**
75 * @brief State of a processor.
76 *
77 * The processor state controls the life cycle of processors at the lowest
78 * level.  No multi-threading or other high-level concepts matter here.
79 *
80 * State changes must be initiated via _Per_CPU_State_change().  This function
81 * may not return in case someone requested a shutdown.  The
82 * _SMP_Send_message() function will be used to notify other processors about
83 * state changes if the other processor is in the up state.
84 *
85 * Due to the sequential nature of the basic system initialization one
86 * processor has a special role.  It is the processor executing the boot_card()
87 * function.  This processor is called the boot processor.  All other
88 * processors are called secondary.
89 *
90 * @dot
91 * digraph states {
92 *   i [label="PER_CPU_STATE_INITIAL"];
93 *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
94 *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
95 *   u [label="PER_CPU_STATE_UP"];
96 *   s [label="PER_CPU_STATE_SHUTDOWN"];
97 *   i -> rdy [label="processor\ncompleted initialization"];
98 *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
99 *   reqsm -> u [label="processor\nstarts multitasking"];
100 *   i -> s;
101 *   rdy -> s;
102 *   reqsm -> s;
103 *   u -> s;
104 * }
105 * @enddot
106 */
107typedef enum {
108  /**
109   * @brief The per CPU controls are initialized to zero.
110   *
111   * The boot processor executes the sequential boot code in this state.  The
112   * secondary processors should perform their basic initialization now and
113   * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
114   * is complete.
115   */
116  PER_CPU_STATE_INITIAL,
117
118  /**
119   * @brief Processor is ready to start multitasking.
120   *
121   * The secondary processor performed its basic initialization and is ready to
122   * receive inter-processor interrupts.  Interrupt delivery must be disabled
123   * in this state, but requested inter-processor interrupts must be recorded
124   * and must be delivered once the secondary processor enables interrupts for
125   * the first time.  The boot processor will wait for all secondary processors
126   * to change into this state.  In case a secondary processor does not reach
127   * this state the system will not start.  The secondary processors wait now
128   * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set
129   * by the boot processor once all secondary processors reached the
130   * PER_CPU_STATE_READY_TO_START_MULTITASKING state.
131   */
132  PER_CPU_STATE_READY_TO_START_MULTITASKING,
133
134  /**
135   * @brief Multitasking start of processor is requested.
136   *
137   * The boot processor completed system initialization and is about to perform
138   * a context switch to its heir thread.  Secondary processors should now
139   * issue a context switch to the heir thread.  This normally enables
140   * interrupts on the processor for the first time.
141   */
142  PER_CPU_STATE_REQUEST_START_MULTITASKING,
143
144  /**
145   * @brief Normal multitasking state.
146   */
147  PER_CPU_STATE_UP,
148
149  /**
150   * @brief This is the terminal state.
151   */
152  PER_CPU_STATE_SHUTDOWN
153} Per_CPU_State;
154
155#endif /* defined( RTEMS_SMP ) */
156
157/**
158 * @brief Per-CPU statistics.
159 */
160typedef struct {
161#if defined( RTEMS_PROFILING )
162  /**
163   * @brief The thread dispatch disabled begin instant in CPU counter ticks.
164   *
165   * This value is used to measure the time of disabled thread dispatching.
166   */
167  CPU_Counter_ticks thread_dispatch_disabled_instant;
168
169  /**
170   * @brief The maximum time of disabled thread dispatching in CPU counter
171   * ticks.
172   */
173  CPU_Counter_ticks max_thread_dispatch_disabled_time;
174
175  /**
176   * @brief The maximum time spent to process a single sequence of nested
177   * interrupts in CPU counter ticks.
178   *
179   * This is the time interval between the change of the interrupt nest level
180   * from zero to one and the change back from one to zero.
181   */
182  CPU_Counter_ticks max_interrupt_time;
183
184  /**
185   * @brief The maximum interrupt delay in CPU counter ticks if supported by
186   * the hardware.
187   */
188  CPU_Counter_ticks max_interrupt_delay;
189
190  /**
191   * @brief Count of times when the thread dispatch disable level changes from
192   * zero to one in thread context.
193   *
194   * This value may overflow.
195   */
196  uint64_t thread_dispatch_disabled_count;
197
198  /**
199   * @brief Total time of disabled thread dispatching in CPU counter ticks.
200   *
201   * The average time of disabled thread dispatching is the total time of
202   * disabled thread dispatching divided by the thread dispatch disabled
203   * count.
204   *
205   * This value may overflow.
206   */
207  uint64_t total_thread_dispatch_disabled_time;
208
209  /**
210   * @brief Count of times when the interrupt nest level changes from zero to
211   * one.
212   *
213   * This value may overflow.
214   */
215  uint64_t interrupt_count;
216
217  /**
218   * @brief Total time of interrupt processing in CPU counter ticks.
219   *
220   * The average time of interrupt processing is the total time of interrupt
221   * processing divided by the interrupt count.
222   *
223   * This value may overflow.
224   */
225  uint64_t total_interrupt_time;
226#endif /* defined( RTEMS_PROFILING ) */
227} Per_CPU_Stats;
228
229/**
230 * @brief Per-CPU watchdog header index.
231 */
232typedef enum {
233  /**
234   * @brief Index for relative per-CPU watchdog header.
235   *
236   * The reference time point for this header is current ticks value
237   * during insert.  Time is measured in clock ticks.
238   */
239  PER_CPU_WATCHDOG_RELATIVE,
240
241  /**
242   * @brief Index for absolute per-CPU watchdog header.
243   *
244   * The reference time point for this header is the POSIX Epoch.  Time is
245   * measured in nanoseconds since POSIX Epoch.
246   */
247  PER_CPU_WATCHDOG_ABSOLUTE,
248
249  /**
250   * @brief Count of per-CPU watchdog headers.
251   */
252  PER_CPU_WATCHDOG_COUNT
253} Per_CPU_Watchdog_index;
254
255/**
256 *  @brief Per CPU Core Structure
257 *
258 *  This structure is used to hold per core state information.
259 */
260typedef struct Per_CPU_Control {
261  /**
262   * @brief CPU port specific control.
263   */
264  CPU_Per_CPU_control cpu_per_cpu;
265
266  #if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
267      (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
268    /**
269     * This contains a pointer to the lower range of the interrupt stack for
270     * this CPU.  This is the address allocated and freed.
271     */
272    void  *interrupt_stack_low;
273
274    /**
275     * This contains a pointer to the interrupt stack pointer for this CPU.
276     * It will be loaded at the beginning on an ISR.
277     */
278    void  *interrupt_stack_high;
279  #endif
280
281  /**
282   *  This contains the current interrupt nesting level on this
283   *  CPU.
284   */
285  uint32_t isr_nest_level;
286
287  /**
288   * @brief The thread dispatch critical section nesting counter which is used
289   * to prevent context switches at inopportune moments.
290   */
291  volatile uint32_t thread_dispatch_disable_level;
292
293  /**
294   * @brief This is the thread executing on this processor.
295   *
296   * This field is not protected by a lock.  The only writer is this processor.
297   *
298   * On SMP configurations a thread may be registered as executing on more than
299   * one processor in case a thread migration is in progress.  On SMP
300   * configurations use _Thread_Is_executing_on_a_processor() to figure out if
301   * a thread context is executing on a processor.
302   */
303  struct _Thread_Control *executing;
304
305  /**
306   * @brief This is the heir thread for this processor.
307   *
308   * This field is not protected by a lock.  The only writer after multitasking
309   * start is the scheduler owning this processor.  It is assumed that stores
310   * to pointers are atomic on all supported SMP architectures.  The CPU port
311   * specific code (inter-processor interrupt handling and
312   * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
313   * last value written.
314   *
315   * A thread can be a heir on at most one processor in the system.
316   *
317   * @see _Thread_Get_heir_and_make_it_executing().
318   */
319  struct _Thread_Control *heir;
320
321  /**
322   * @brief This is set to true when this processor needs to run the thread
323   * dispatcher.
324   *
325   * It is volatile since interrupts may alter this flag.
326   *
327   * This field is not protected by a lock and must be accessed only by this
328   * processor.  Code (e.g. scheduler and post-switch action requests) running
329   * on another processors must use an inter-processor interrupt to set the
330   * thread dispatch necessary indicator to true.
331   *
332   * @see _Thread_Get_heir_and_make_it_executing().
333   */
334  volatile bool dispatch_necessary;
335
336  /**
337   * @brief The CPU usage timestamp contains the time point of the last heir
338   * thread change or last CPU usage update of the executing thread of this
339   * processor.
340   *
341   * Protected by the scheduler lock.
342   *
343   * @see _Scheduler_Update_heir(), _Thread_Dispatch_update_heir() and
344   * _Thread_Get_CPU_time_used().
345   */
346  Timestamp_Control cpu_usage_timestamp;
347
348  /**
349   * @brief Watchdog state for this processor.
350   */
351  struct {
352    /**
353     * @brief Protects all watchdog operations on this processor.
354     */
355    ISR_LOCK_MEMBER( Lock )
356
357    /**
358     * @brief Watchdog ticks on this processor used for relative watchdogs.
359     */
360    uint64_t ticks;
361
362    /**
363     * @brief Header for watchdogs.
364     *
365     * @see Per_CPU_Watchdog_index.
366     */
367    Watchdog_Header Header[ PER_CPU_WATCHDOG_COUNT ];
368  } Watchdog;
369
370  #if defined( RTEMS_SMP )
371    /**
372     * @brief This lock protects some parts of the low-level thread dispatching.
373     *
374     * We must use a ticket lock here since we cannot transport a local context
375     * through the context switch.
376     *
377     * @see _Thread_Dispatch().
378     */
379    SMP_ticket_lock_Control Lock;
380
381    #if defined( RTEMS_PROFILING )
382      /**
383       * @brief Lock statistics for the per-CPU lock.
384       */
385      SMP_lock_Stats Lock_stats;
386
387      /**
388       * @brief Lock statistics context for the per-CPU lock.
389       */
390      SMP_lock_Stats_context Lock_stats_context;
391    #endif
392
393    /**
394     * @brief Context for the Giant lock acquire and release pair of this
395     * processor.
396     */
397    SMP_lock_Context Giant_lock_context;
398
399    /**
400     * @brief Bit field for SMP messages.
401     *
402     * This bit field is not protected locks.  Atomic operations are used to
403     * set and get the message bits.
404     */
405    Atomic_Ulong message;
406
407    /**
408     * @brief The scheduler context of the scheduler owning this processor.
409     */
410    const struct Scheduler_Context *scheduler_context;
411
412    /**
413     * @brief Indicates the current state of the CPU.
414     *
415     * This field is protected by the _Per_CPU_State_lock lock.
416     *
417     * @see _Per_CPU_State_change().
418     */
419    Per_CPU_State state;
420
421    /**
422     * @brief Action to be executed by this processor in the
423     * SYSTEM_STATE_BEFORE_MULTITASKING state on behalf of the boot processor.
424     *
425     * @see _SMP_Before_multitasking_action().
426     */
427    Atomic_Uintptr before_multitasking_action;
428
429    /**
430     * @brief Indicates if the processor has been successfully started via
431     * _CPU_SMP_Start_processor().
432     */
433    bool online;
434
435    /**
436     * @brief Indicates if the processor is the one that performed the initial
437     * system initialization.
438     */
439    bool boot;
440  #endif
441
442  Per_CPU_Stats Stats;
443} Per_CPU_Control;
444
445#if defined( RTEMS_SMP )
446typedef struct {
447  Per_CPU_Control per_cpu;
448  char unused_space_for_cache_line_alignment
449    [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
450} Per_CPU_Control_envelope;
451#else
452typedef struct {
453  Per_CPU_Control per_cpu;
454} Per_CPU_Control_envelope;
455#endif
456
457/**
458 *  @brief Set of Per CPU Core Information
459 *
460 *  This is an array of per CPU core information.
461 */
462extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
463
464#if defined( RTEMS_SMP )
465#define _Per_CPU_Acquire( cpu ) \
466  _SMP_ticket_lock_Acquire( \
467    &( cpu )->Lock, \
468    &( cpu )->Lock_stats, \
469    &( cpu )->Lock_stats_context \
470  )
471#else
472#define _Per_CPU_Acquire( cpu ) \
473  do { \
474    (void) ( cpu ); \
475  } while ( 0 )
476#endif
477
478#if defined( RTEMS_SMP )
479#define _Per_CPU_Release( cpu ) \
480  _SMP_ticket_lock_Release( \
481    &( cpu )->Lock, \
482    &( cpu )->Lock_stats_context \
483  )
484#else
485#define _Per_CPU_Release( cpu ) \
486  do { \
487    (void) ( cpu ); \
488  } while ( 0 )
489#endif
490
491#if defined( RTEMS_SMP )
492#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
493  do { \
494    _ISR_Disable_without_giant( isr_cookie ); \
495    _Per_CPU_Acquire( cpu ); \
496  } while ( 0 )
497#else
498#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
499  do { \
500    _ISR_Disable( isr_cookie ); \
501    (void) ( cpu ); \
502  } while ( 0 )
503#endif
504
505#if defined( RTEMS_SMP )
506#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
507  do { \
508    _Per_CPU_Release( cpu ); \
509    _ISR_Enable_without_giant( isr_cookie ); \
510  } while ( 0 )
511#else
512#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
513  do { \
514    (void) ( cpu ); \
515    _ISR_Enable( isr_cookie ); \
516  } while ( 0 )
517#endif
518
519#if defined( RTEMS_SMP )
520#define _Per_CPU_Acquire_all( isr_cookie ) \
521  do { \
522    uint32_t ncpus = _SMP_Get_processor_count(); \
523    uint32_t cpu; \
524    _ISR_Disable( isr_cookie ); \
525    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
526      _Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \
527    } \
528  } while ( 0 )
529#else
530#define _Per_CPU_Acquire_all( isr_cookie ) \
531  _ISR_Disable( isr_cookie )
532#endif
533
534#if defined( RTEMS_SMP )
535#define _Per_CPU_Release_all( isr_cookie ) \
536  do { \
537    uint32_t ncpus = _SMP_Get_processor_count(); \
538    uint32_t cpu; \
539    for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
540      _Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \
541    } \
542    _ISR_Enable( isr_cookie ); \
543  } while ( 0 )
544#else
545#define _Per_CPU_Release_all( isr_cookie ) \
546  _ISR_Enable( isr_cookie )
547#endif
548
549/*
550 * If we get the current processor index in a context which allows thread
551 * dispatching, then we may already run on another processor right after the
552 * read instruction.  There are very few cases in which this makes sense (here
553 * we can use _Per_CPU_Get_snapshot()).  All other places must use
554 * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
555 */
556#if defined( _CPU_Get_current_per_CPU_control )
557  #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
558#else
559  #define _Per_CPU_Get_snapshot() \
560    ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
561#endif
562
563#if defined( RTEMS_SMP )
564static inline Per_CPU_Control *_Per_CPU_Get( void )
565{
566  Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
567
568  _Assert(
569    cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
570  );
571
572  return cpu_self;
573}
574#else
575#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
576#endif
577
578static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
579{
580  return &_Per_CPU_Information[ index ].per_cpu;
581}
582
583static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
584{
585  const Per_CPU_Control_envelope *per_cpu_envelope =
586    ( const Per_CPU_Control_envelope * ) cpu;
587
588  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
589}
590
591static inline struct _Thread_Control *_Per_CPU_Get_executing(
592  const Per_CPU_Control *cpu
593)
594{
595  return cpu->executing;
596}
597
598static inline bool _Per_CPU_Is_processor_online(
599  const Per_CPU_Control *cpu
600)
601{
602#if defined( RTEMS_SMP )
603  return cpu->online;
604#else
605  (void) cpu;
606
607  return true;
608#endif
609}
610
611static inline bool _Per_CPU_Is_boot_processor(
612  const Per_CPU_Control *cpu
613)
614{
615#if defined( RTEMS_SMP )
616  return cpu->boot;
617#else
618  (void) cpu;
619
620  return true;
621#endif
622}
623
624#if defined( RTEMS_SMP )
625
626static inline void _Per_CPU_Send_interrupt( const Per_CPU_Control *cpu )
627{
628  _CPU_SMP_Send_interrupt( _Per_CPU_Get_index( cpu ) );
629}
630
631/**
632 *  @brief Allocate and Initialize Per CPU Structures
633 *
634 *  This method allocates and initialize the per CPU structure.
635 */
636void _Per_CPU_Initialize(void);
637
638void _Per_CPU_State_change(
639  Per_CPU_Control *cpu,
640  Per_CPU_State new_state
641);
642
643/**
644 * @brief Waits for a processor to change into a non-initial state.
645 *
646 * This function should be called only in _CPU_SMP_Start_processor() if
647 * required by the CPU port or BSP.
648 *
649 * @code
650 * bool _CPU_SMP_Start_processor(uint32_t cpu_index)
651 * {
652 *   uint32_t timeout = 123456;
653 *
654 *   start_the_processor(cpu_index);
655 *
656 *   return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout);
657 * }
658 * @endcode
659 *
660 * @param[in] cpu_index The processor index.
661 * @param[in] timeout_in_ns The timeout in nanoseconds.  Use a value of zero to
662 * wait forever if necessary.
663 *
664 * @retval true The processor is in a non-initial state.
665 * @retval false The timeout expired before the processor reached a non-initial
666 * state.
667 */
668bool _Per_CPU_State_wait_for_non_initial_state(
669  uint32_t cpu_index,
670  uint32_t timeout_in_ns
671);
672
673#endif /* defined( RTEMS_SMP ) */
674
675/*
676 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
677 * Thus when built for non-SMP, there should be no performance penalty.
678 */
679#define _Thread_Dispatch_disable_level \
680  _Per_CPU_Get()->thread_dispatch_disable_level
681#define _Thread_Heir \
682  _Per_CPU_Get()->heir
683#define _Thread_Executing \
684  _Per_CPU_Get()->executing
685#define _ISR_Nest_level \
686  _Per_CPU_Get()->isr_nest_level
687#define _CPU_Interrupt_stack_low \
688  _Per_CPU_Get()->interrupt_stack_low
689#define _CPU_Interrupt_stack_high \
690  _Per_CPU_Get()->interrupt_stack_high
691#define _Thread_Dispatch_necessary \
692  _Per_CPU_Get()->dispatch_necessary
693
694/**
695 * @brief Returns the thread control block of the executing thread.
696 *
697 * This function can be called in any context.  On SMP configurations
698 * interrupts are disabled to ensure that the processor index is used
699 * consistently.
700 *
701 * @return The thread control block of the executing thread.
702 */
703RTEMS_INLINE_ROUTINE struct _Thread_Control *_Thread_Get_executing( void )
704{
705  struct _Thread_Control *executing;
706
707  #if defined( RTEMS_SMP )
708    ISR_Level level;
709
710    _ISR_Disable_without_giant( level );
711  #endif
712
713  executing = _Thread_Executing;
714
715  #if defined( RTEMS_SMP )
716    _ISR_Enable_without_giant( level );
717  #endif
718
719  return executing;
720}
721
722/**@}*/
723
724#endif /* !defined( ASM ) */
725
726#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
727
728#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
729    (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
730  /*
731   *  If this CPU target lets RTEMS allocates the interrupt stack, then
732   *  we need to have places in the per CPU table to hold them.
733   */
734  #define PER_CPU_INTERRUPT_STACK_LOW \
735    CPU_PER_CPU_CONTROL_SIZE
736  #define PER_CPU_INTERRUPT_STACK_HIGH \
737    PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
738  #define PER_CPU_END_STACK             \
739    PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
740
741  #define INTERRUPT_STACK_LOW \
742    (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
743  #define INTERRUPT_STACK_HIGH \
744    (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
745#else
746  #define PER_CPU_END_STACK \
747    CPU_PER_CPU_CONTROL_SIZE
748#endif
749
750/*
751 *  These are the offsets of the required elements in the per CPU table.
752 */
753#define PER_CPU_ISR_NEST_LEVEL \
754  PER_CPU_END_STACK
755#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
756  PER_CPU_ISR_NEST_LEVEL + 4
757#define PER_CPU_OFFSET_EXECUTING \
758  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
759#define PER_CPU_OFFSET_HEIR \
760  PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
761#define PER_CPU_DISPATCH_NEEDED \
762  PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
763
764#define THREAD_DISPATCH_DISABLE_LEVEL \
765  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
766#define ISR_NEST_LEVEL \
767  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
768#define DISPATCH_NEEDED \
769  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
770
771#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
772
773#ifdef __cplusplus
774}
775#endif
776
777#endif
778/* end of include file */
Note: See TracBrowser for help on using the repository browser.