source: rtems/cpukit/include/rtems/score/percpu.h @ f410b31b

5
Last change on this file since f410b31b was e90486a, checked in by Sebastian Huber <sebastian.huber@…>, on 04/11/19 at 13:16:40

score: Rework SMP multicast action

Use a FIFO list of jobs per processor to carry out the SMP multicast
action. Use a done indicator per job to reduce the bus traffic a bit.

  • Property mode set to 100644
File size: 23.4 KB
Line 
1/**
2 *  @file
3 *
4 *  This include file defines the per CPU information required
5 *  by RTEMS.
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (c) 2012, 2018 embedded brains GmbH
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_PERCPU_H
20#define _RTEMS_PERCPU_H
21
22#include <rtems/score/cpuimpl.h>
23
24#if defined( ASM )
25  #include <rtems/asm.h>
26#else
27  #include <rtems/score/assert.h>
28  #include <rtems/score/chain.h>
29  #include <rtems/score/isrlock.h>
30  #include <rtems/score/smp.h>
31  #include <rtems/score/timestamp.h>
32  #include <rtems/score/watchdog.h>
33#endif
34
35#ifdef __cplusplus
36extern "C" {
37#endif
38
39#if defined(RTEMS_SMP)
40  #if defined(RTEMS_PROFILING)
41    #define PER_CPU_CONTROL_SIZE_APPROX ( 512 + CPU_INTERRUPT_FRAME_SIZE )
42  #elif defined(RTEMS_DEBUG) || CPU_SIZEOF_POINTER > 4
43    #define PER_CPU_CONTROL_SIZE_APPROX ( 256 + CPU_INTERRUPT_FRAME_SIZE )
44  #else
45    #define PER_CPU_CONTROL_SIZE_APPROX ( 128 + CPU_INTERRUPT_FRAME_SIZE )
46  #endif
47
48  /*
49   * This ensures that on SMP configurations the individual per-CPU controls
50   * are on different cache lines to prevent false sharing.  This define can be
51   * used in assembler code to easily get the per-CPU control for a particular
52   * processor.
53   */
54  #if PER_CPU_CONTROL_SIZE_APPROX > 1024
55    #define PER_CPU_CONTROL_SIZE_LOG2 11
56  #elif PER_CPU_CONTROL_SIZE_APPROX > 512
57    #define PER_CPU_CONTROL_SIZE_LOG2 10
58  #elif PER_CPU_CONTROL_SIZE_APPROX > 256
59    #define PER_CPU_CONTROL_SIZE_LOG2 9
60  #elif PER_CPU_CONTROL_SIZE_APPROX > 128
61    #define PER_CPU_CONTROL_SIZE_LOG2 8
62  #else
63    #define PER_CPU_CONTROL_SIZE_LOG2 7
64  #endif
65
66  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
67#endif
68
69#if !defined( ASM )
70
71struct Record_Control;
72
73struct _Thread_Control;
74
75struct Scheduler_Context;
76
77struct Per_CPU_Job;
78
79/**
80 *  @defgroup PerCPU RTEMS Per CPU Information
81 *
82 *  @ingroup RTEMSScore
83 *
84 *  This defines the per CPU state information required by RTEMS
85 *  and the BSP.  In an SMP configuration, there will be multiple
86 *  instances of this data structure -- one per CPU -- and the
87 *  current CPU number will be used as the index.
88 */
89
90/**@{*/
91
92#if defined( RTEMS_SMP )
93
94/**
95 * @brief State of a processor.
96 *
97 * The processor state controls the life cycle of processors at the lowest
98 * level.  No multi-threading or other high-level concepts matter here.
99 *
100 * State changes must be initiated via _Per_CPU_State_change().  This function
101 * may not return in case someone requested a shutdown.  The
102 * _SMP_Send_message() function will be used to notify other processors about
103 * state changes if the other processor is in the up state.
104 *
105 * Due to the sequential nature of the basic system initialization one
106 * processor has a special role.  It is the processor executing the boot_card()
107 * function.  This processor is called the boot processor.  All other
108 * processors are called secondary.
109 *
110 * @dot
111 * digraph states {
112 *   i [label="PER_CPU_STATE_INITIAL"];
113 *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
114 *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
115 *   u [label="PER_CPU_STATE_UP"];
116 *   s [label="PER_CPU_STATE_SHUTDOWN"];
117 *   i -> rdy [label="processor\ncompleted initialization"];
118 *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
119 *   reqsm -> u [label="processor\nstarts multitasking"];
120 *   i -> s;
121 *   rdy -> s;
122 *   reqsm -> s;
123 *   u -> s;
124 * }
125 * @enddot
126 */
127typedef enum {
128  /**
129   * @brief The per CPU controls are initialized to zero.
130   *
131   * The boot processor executes the sequential boot code in this state.  The
132   * secondary processors should perform their basic initialization now and
133   * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
134   * is complete.
135   */
136  PER_CPU_STATE_INITIAL,
137
138  /**
139   * @brief Processor is ready to start multitasking.
140   *
141   * The secondary processor performed its basic initialization and is ready to
142   * receive inter-processor interrupts.  Interrupt delivery must be disabled
143   * in this state, but requested inter-processor interrupts must be recorded
144   * and must be delivered once the secondary processor enables interrupts for
145   * the first time.  The boot processor will wait for all secondary processors
146   * to change into this state.  In case a secondary processor does not reach
147   * this state the system will not start.  The secondary processors wait now
148   * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set
149   * by the boot processor once all secondary processors reached the
150   * PER_CPU_STATE_READY_TO_START_MULTITASKING state.
151   */
152  PER_CPU_STATE_READY_TO_START_MULTITASKING,
153
154  /**
155   * @brief Multitasking start of processor is requested.
156   *
157   * The boot processor completed system initialization and is about to perform
158   * a context switch to its heir thread.  Secondary processors should now
159   * issue a context switch to the heir thread.  This normally enables
160   * interrupts on the processor for the first time.
161   */
162  PER_CPU_STATE_REQUEST_START_MULTITASKING,
163
164  /**
165   * @brief Normal multitasking state.
166   */
167  PER_CPU_STATE_UP,
168
169  /**
170   * @brief This is the terminal state.
171   */
172  PER_CPU_STATE_SHUTDOWN
173} Per_CPU_State;
174
175#endif /* defined( RTEMS_SMP ) */
176
177/**
178 * @brief Per-CPU statistics.
179 */
180typedef struct {
181#if defined( RTEMS_PROFILING )
182  /**
183   * @brief The thread dispatch disabled begin instant in CPU counter ticks.
184   *
185   * This value is used to measure the time of disabled thread dispatching.
186   */
187  CPU_Counter_ticks thread_dispatch_disabled_instant;
188
189  /**
190   * @brief The maximum time of disabled thread dispatching in CPU counter
191   * ticks.
192   */
193  CPU_Counter_ticks max_thread_dispatch_disabled_time;
194
195  /**
196   * @brief The maximum time spent to process a single sequence of nested
197   * interrupts in CPU counter ticks.
198   *
199   * This is the time interval between the change of the interrupt nest level
200   * from zero to one and the change back from one to zero.
201   */
202  CPU_Counter_ticks max_interrupt_time;
203
204  /**
205   * @brief The maximum interrupt delay in CPU counter ticks if supported by
206   * the hardware.
207   */
208  CPU_Counter_ticks max_interrupt_delay;
209
210  /**
211   * @brief Count of times when the thread dispatch disable level changes from
212   * zero to one in thread context.
213   *
214   * This value may overflow.
215   */
216  uint64_t thread_dispatch_disabled_count;
217
218  /**
219   * @brief Total time of disabled thread dispatching in CPU counter ticks.
220   *
221   * The average time of disabled thread dispatching is the total time of
222   * disabled thread dispatching divided by the thread dispatch disabled
223   * count.
224   *
225   * This value may overflow.
226   */
227  uint64_t total_thread_dispatch_disabled_time;
228
229  /**
230   * @brief Count of times when the interrupt nest level changes from zero to
231   * one.
232   *
233   * This value may overflow.
234   */
235  uint64_t interrupt_count;
236
237  /**
238   * @brief Total time of interrupt processing in CPU counter ticks.
239   *
240   * The average time of interrupt processing is the total time of interrupt
241   * processing divided by the interrupt count.
242   *
243   * This value may overflow.
244   */
245  uint64_t total_interrupt_time;
246#endif /* defined( RTEMS_PROFILING ) */
247} Per_CPU_Stats;
248
249/**
250 * @brief Per-CPU watchdog header index.
251 */
252typedef enum {
253  /**
254   * @brief Index for tick clock per-CPU watchdog header.
255   *
256   * The reference time point for the tick clock is the system start.  The
257   * clock resolution is one system clock tick.  It is used for the system
258   * clock tick based time services.
259   */
260  PER_CPU_WATCHDOG_TICKS,
261
262  /**
263   * @brief Index for realtime clock per-CPU watchdog header.
264   *
265   * The reference time point for the realtime clock is the POSIX Epoch.  The
266   * clock resolution is one nanosecond.  It is used for the time of day
267   * services and the POSIX services using CLOCK_REALTIME.
268   */
269  PER_CPU_WATCHDOG_REALTIME,
270
271  /**
272   * @brief Index for monotonic clock per-CPU watchdog header.
273   *
274   * The reference time point for the monotonic clock is the system start.  The
275   * clock resolution is one nanosecond.  It is used for the POSIX services
276   * using CLOCK_MONOTONIC.
277   */
278  PER_CPU_WATCHDOG_MONOTONIC,
279
280  /**
281   * @brief Count of per-CPU watchdog headers.
282   */
283  PER_CPU_WATCHDOG_COUNT
284} Per_CPU_Watchdog_index;
285
286/**
287 *  @brief Per CPU Core Structure
288 *
289 *  This structure is used to hold per core state information.
290 */
291typedef struct Per_CPU_Control {
292  #if CPU_PER_CPU_CONTROL_SIZE > 0
293    /**
294     * @brief CPU port specific control.
295     */
296    CPU_Per_CPU_control cpu_per_cpu;
297  #endif
298
299  /**
300   * @brief The interrupt stack low address for this processor.
301   */
302  void *interrupt_stack_low;
303
304  /**
305   * @brief The interrupt stack high address for this processor.
306   */
307  void *interrupt_stack_high;
308
309  /**
310   *  This contains the current interrupt nesting level on this
311   *  CPU.
312   */
313  uint32_t isr_nest_level;
314
315  /**
316   * @brief Indicetes if an ISR thread dispatch is disabled.
317   *
318   * This flag is context switched with each thread.  It indicates that this
319   * thread has an interrupt stack frame on its stack.  By using this flag, we
320   * can avoid nesting more interrupt dispatching attempts on a previously
321   * interrupted thread's stack.
322   */
323  uint32_t isr_dispatch_disable;
324
325  /**
326   * @brief The thread dispatch critical section nesting counter which is used
327   * to prevent context switches at inopportune moments.
328   */
329  volatile uint32_t thread_dispatch_disable_level;
330
331  /**
332   * @brief This is set to true when this processor needs to run the thread
333   * dispatcher.
334   *
335   * It is volatile since interrupts may alter this flag.
336   *
337   * This member is not protected by a lock and must be accessed only by this
338   * processor.  Code (e.g. scheduler and post-switch action requests) running
339   * on another processors must use an inter-processor interrupt to set the
340   * thread dispatch necessary indicator to true.
341   *
342   * @see _Thread_Get_heir_and_make_it_executing().
343   */
344  volatile bool dispatch_necessary;
345
346  /*
347   * Ensure that the executing member is at least 4-byte aligned, see
348   * PER_CPU_OFFSET_EXECUTING.  This is necessary on CPU ports with relaxed
349   * alignment restrictions, e.g. type alignment is less than the type size.
350   */
351  bool reserved_for_executing_alignment[ 3 ];
352
353  /**
354   * @brief This is the thread executing on this processor.
355   *
356   * This member is not protected by a lock.  The only writer is this
357   * processor.
358   *
359   * On SMP configurations a thread may be registered as executing on more than
360   * one processor in case a thread migration is in progress.  On SMP
361   * configurations use _Thread_Is_executing_on_a_processor() to figure out if
362   * a thread context is executing on a processor.
363   */
364  struct _Thread_Control *executing;
365
366  /**
367   * @brief This is the heir thread for this processor.
368   *
369   * This member is not protected by a lock.  The only writer after
370   * multitasking start is the scheduler owning this processor.  It is assumed
371   * that stores to pointers are atomic on all supported SMP architectures.
372   * The CPU port specific code (inter-processor interrupt handling and
373   * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
374   * last value written.
375   *
376   * A thread can be a heir on at most one processor in the system.
377   *
378   * @see _Thread_Get_heir_and_make_it_executing().
379   */
380  struct _Thread_Control *heir;
381
382#if defined(RTEMS_SMP)
383  CPU_Interrupt_frame Interrupt_frame;
384#endif
385
386  /**
387   * @brief The CPU usage timestamp contains the time point of the last heir
388   * thread change or last CPU usage update of the executing thread of this
389   * processor.
390   *
391   * Protected by the scheduler lock.
392   *
393   * @see _Scheduler_Update_heir(), _Thread_Dispatch_update_heir() and
394   * _Thread_Get_CPU_time_used().
395   */
396  Timestamp_Control cpu_usage_timestamp;
397
398  /**
399   * @brief Watchdog state for this processor.
400   */
401  struct {
402    /**
403     * @brief Protects all watchdog operations on this processor.
404     */
405    ISR_LOCK_MEMBER( Lock )
406
407    /**
408     * @brief Watchdog ticks on this processor used for monotonic clock
409     * watchdogs.
410     */
411    uint64_t ticks;
412
413    /**
414     * @brief Header for watchdogs.
415     *
416     * @see Per_CPU_Watchdog_index.
417     */
418    Watchdog_Header Header[ PER_CPU_WATCHDOG_COUNT ];
419  } Watchdog;
420
421  #if defined( RTEMS_SMP )
422    /**
423     * @brief This lock protects some members of this structure.
424     */
425    ISR_lock_Control Lock;
426
427    /**
428     * @brief Lock context used to acquire all per-CPU locks.
429     *
430     * This member is protected by the Per_CPU_Control::Lock lock.
431     *
432     * @see _Per_CPU_Acquire_all().
433     */
434    ISR_lock_Context Lock_context;
435
436    /**
437     * @brief Chain of threads in need for help.
438     *
439     * This member is protected by the Per_CPU_Control::Lock lock.
440     */
441    Chain_Control Threads_in_need_for_help;
442
443    /**
444     * @brief Bit field for SMP messages.
445     *
446     * This member is not protected locks.  Atomic operations are used to set
447     * and get the message bits.
448     */
449    Atomic_Ulong message;
450
451    struct {
452      /**
453       * @brief The scheduler control of the scheduler owning this processor.
454       *
455       * This pointer is NULL in case this processor is currently not used by a
456       * scheduler instance.
457       */
458      const struct _Scheduler_Control *control;
459
460      /**
461       * @brief The scheduler context of the scheduler owning this processor.
462       *
463       * This pointer is NULL in case this processor is currently not used by a
464       * scheduler instance.
465       */
466      const struct Scheduler_Context *context;
467
468      /**
469       * @brief The idle thread for this processor in case it is online and
470       * currently not used by a scheduler instance.
471       */
472      struct _Thread_Control *idle_if_online_and_unused;
473    } Scheduler;
474
475    /**
476     * @brief Begin of the per-CPU data area.
477     *
478     * Contains items defined via PER_CPU_DATA_ITEM().
479     */
480    char *data;
481
482    /**
483     * @brief Indicates the current state of the CPU.
484     *
485     * This member is protected by the _Per_CPU_State_lock lock.
486     *
487     * @see _Per_CPU_State_change().
488     */
489    Per_CPU_State state;
490
491    /**
492     * @brief Action to be executed by this processor in the
493     * SYSTEM_STATE_BEFORE_MULTITASKING state on behalf of the boot processor.
494     *
495     * @see _SMP_Before_multitasking_action().
496     */
497    Atomic_Uintptr before_multitasking_action;
498
499    /**
500     * @brief FIFO list of jobs to be performed by this processor.
501     *
502     * @see _SMP_Multicast_action().
503     */
504    struct {
505      /**
506       * @brief Lock to protect the FIFO list of jobs to be performed by this
507       * processor.
508       */
509      ISR_lock_Control Lock;
510
511      /**
512       * @brief Head of the FIFO list of jobs to be performed by this
513       * processor.
514       *
515       * This member is protected by the Per_CPU_Control::Jobs::Lock lock.
516       */
517      struct Per_CPU_Job *head;
518
519      /**
520       * @brief Tail of the FIFO list of jobs to be performed by this
521       * processor.
522       *
523       * This member is only valid if the head is not @c NULL.
524       *
525       * This member is protected by the Per_CPU_Control::Jobs::Lock lock.
526       */
527      struct Per_CPU_Job **tail;
528    } Jobs;
529
530    /**
531     * @brief Indicates if the processor has been successfully started via
532     * _CPU_SMP_Start_processor().
533     */
534    bool online;
535
536    /**
537     * @brief Indicates if the processor is the one that performed the initial
538     * system initialization.
539     */
540    bool boot;
541  #endif
542
543  struct Record_Control *record;
544
545  Per_CPU_Stats Stats;
546} Per_CPU_Control;
547
548#if defined( RTEMS_SMP )
549typedef struct {
550  Per_CPU_Control per_cpu;
551  char unused_space_for_cache_line_alignment
552    [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
553} Per_CPU_Control_envelope;
554#else
555typedef struct {
556  Per_CPU_Control per_cpu;
557} Per_CPU_Control_envelope;
558#endif
559
560/**
561 *  @brief Set of Per CPU Core Information
562 *
563 *  This is an array of per CPU core information.
564 */
565extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
566
567#define _Per_CPU_Acquire( cpu, lock_context ) \
568  _ISR_lock_Acquire( &( cpu )->Lock, lock_context )
569
570#define _Per_CPU_Release( cpu, lock_context ) \
571  _ISR_lock_Release( &( cpu )->Lock, lock_context )
572
573/*
574 * If we get the current processor index in a context which allows thread
575 * dispatching, then we may already run on another processor right after the
576 * read instruction.  There are very few cases in which this makes sense (here
577 * we can use _Per_CPU_Get_snapshot()).  All other places must use
578 * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
579 */
580#if defined( _CPU_Get_current_per_CPU_control )
581  #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
582#else
583  #define _Per_CPU_Get_snapshot() \
584    ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
585#endif
586
587#if defined( RTEMS_SMP )
588static inline Per_CPU_Control *_Per_CPU_Get( void )
589{
590  Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
591
592  _Assert(
593    cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
594  );
595
596  return cpu_self;
597}
598#else
599#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
600#endif
601
602static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
603{
604  return &_Per_CPU_Information[ index ].per_cpu;
605}
606
607static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
608{
609  const Per_CPU_Control_envelope *per_cpu_envelope =
610    ( const Per_CPU_Control_envelope * ) cpu;
611
612  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
613}
614
615static inline struct _Thread_Control *_Per_CPU_Get_executing(
616  const Per_CPU_Control *cpu
617)
618{
619  return cpu->executing;
620}
621
622static inline bool _Per_CPU_Is_processor_online(
623  const Per_CPU_Control *cpu
624)
625{
626#if defined( RTEMS_SMP )
627  return cpu->online;
628#else
629  (void) cpu;
630
631  return true;
632#endif
633}
634
635static inline bool _Per_CPU_Is_boot_processor(
636  const Per_CPU_Control *cpu
637)
638{
639#if defined( RTEMS_SMP )
640  return cpu->boot;
641#else
642  (void) cpu;
643
644  return true;
645#endif
646}
647
648RTEMS_INLINE_ROUTINE void _Per_CPU_Acquire_all(
649  ISR_lock_Context *lock_context
650)
651{
652#if defined(RTEMS_SMP)
653  uint32_t         cpu_max;
654  uint32_t         cpu_index;
655  Per_CPU_Control *previous_cpu;
656
657  cpu_max = _SMP_Get_processor_maximum();
658  previous_cpu = _Per_CPU_Get_by_index( 0 );
659
660  _ISR_lock_ISR_disable( lock_context );
661  _Per_CPU_Acquire( previous_cpu, lock_context );
662
663  for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
664     Per_CPU_Control *cpu;
665
666     cpu = _Per_CPU_Get_by_index( cpu_index );
667     _Per_CPU_Acquire( cpu, &previous_cpu->Lock_context );
668     previous_cpu = cpu;
669  }
670#else
671  _ISR_lock_ISR_disable( lock_context );
672#endif
673}
674
675RTEMS_INLINE_ROUTINE void _Per_CPU_Release_all(
676  ISR_lock_Context *lock_context
677)
678{
679#if defined(RTEMS_SMP)
680  uint32_t         cpu_max;
681  uint32_t         cpu_index;
682  Per_CPU_Control *cpu;
683
684  cpu_max = _SMP_Get_processor_maximum();
685  cpu = _Per_CPU_Get_by_index( cpu_max - 1 );
686
687  for ( cpu_index = cpu_max - 1 ; cpu_index > 0 ; --cpu_index ) {
688     Per_CPU_Control *previous_cpu;
689
690     previous_cpu = _Per_CPU_Get_by_index( cpu_index - 1 );
691     _Per_CPU_Release( cpu, &previous_cpu->Lock_context );
692     cpu = previous_cpu;
693  }
694
695  _Per_CPU_Release( cpu, lock_context );
696  _ISR_lock_ISR_enable( lock_context );
697#else
698  _ISR_lock_ISR_enable( lock_context );
699#endif
700}
701
702#if defined( RTEMS_SMP )
703
704/**
705 *  @brief Allocate and Initialize Per CPU Structures
706 *
707 *  This method allocates and initialize the per CPU structure.
708 */
709void _Per_CPU_Initialize(void);
710
711void _Per_CPU_State_change(
712  Per_CPU_Control *cpu,
713  Per_CPU_State new_state
714);
715
716/**
717 * @brief Waits for a processor to change into a non-initial state.
718 *
719 * This function should be called only in _CPU_SMP_Start_processor() if
720 * required by the CPU port or BSP.
721 *
722 * @code
723 * bool _CPU_SMP_Start_processor(uint32_t cpu_index)
724 * {
725 *   uint32_t timeout = 123456;
726 *
727 *   start_the_processor(cpu_index);
728 *
729 *   return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout);
730 * }
731 * @endcode
732 *
733 * @param[in] cpu_index The processor index.
734 * @param[in] timeout_in_ns The timeout in nanoseconds.  Use a value of zero to
735 * wait forever if necessary.
736 *
737 * @retval true The processor is in a non-initial state.
738 * @retval false The timeout expired before the processor reached a non-initial
739 * state.
740 */
741bool _Per_CPU_State_wait_for_non_initial_state(
742  uint32_t cpu_index,
743  uint32_t timeout_in_ns
744);
745
746/**
747 * @brief Performs the jobs of the specified processor.
748 *
749 * @param[in, out] cpu The jobs of this processor will be performed.
750 */
751void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu );
752
753#endif /* defined( RTEMS_SMP ) */
754
755/*
756 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
757 * Thus when built for non-SMP, there should be no performance penalty.
758 */
759#define _Thread_Dispatch_disable_level \
760  _Per_CPU_Get()->thread_dispatch_disable_level
761#define _Thread_Heir \
762  _Per_CPU_Get()->heir
763
764#if defined(_CPU_Get_thread_executing)
765#define _Thread_Executing \
766  _CPU_Get_thread_executing()
767#else
768#define _Thread_Executing \
769  _Per_CPU_Get_executing( _Per_CPU_Get() )
770#endif
771
772#define _ISR_Nest_level \
773  _Per_CPU_Get()->isr_nest_level
774#define _CPU_Interrupt_stack_low \
775  _Per_CPU_Get()->interrupt_stack_low
776#define _CPU_Interrupt_stack_high \
777  _Per_CPU_Get()->interrupt_stack_high
778#define _Thread_Dispatch_necessary \
779  _Per_CPU_Get()->dispatch_necessary
780
781/**
782 * @brief Returns the thread control block of the executing thread.
783 *
784 * This function can be called in any thread context.  On SMP configurations,
785 * interrupts are disabled to ensure that the processor index is used
786 * consistently if no CPU port specific method is available to get the
787 * executing thread.
788 *
789 * @return The thread control block of the executing thread.
790 */
791RTEMS_INLINE_ROUTINE struct _Thread_Control *_Thread_Get_executing( void )
792{
793  struct _Thread_Control *executing;
794
795  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
796    ISR_Level level;
797
798    _ISR_Local_disable( level );
799  #endif
800
801  executing = _Thread_Executing;
802
803  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
804    _ISR_Local_enable( level );
805  #endif
806
807  return executing;
808}
809
810/**@}*/
811
812#endif /* !defined( ASM ) */
813
814#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
815
816#define PER_CPU_INTERRUPT_STACK_LOW \
817  CPU_PER_CPU_CONTROL_SIZE
818#define PER_CPU_INTERRUPT_STACK_HIGH \
819  PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
820
821#define INTERRUPT_STACK_LOW \
822  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
823#define INTERRUPT_STACK_HIGH \
824  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
825
826/*
827 *  These are the offsets of the required elements in the per CPU table.
828 */
829#define PER_CPU_ISR_NEST_LEVEL \
830  PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
831#define PER_CPU_ISR_DISPATCH_DISABLE \
832  PER_CPU_ISR_NEST_LEVEL + 4
833#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
834  PER_CPU_ISR_DISPATCH_DISABLE + 4
835#define PER_CPU_DISPATCH_NEEDED \
836  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
837#define PER_CPU_OFFSET_EXECUTING \
838  PER_CPU_DISPATCH_NEEDED + 4
839#define PER_CPU_OFFSET_HEIR \
840  PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
841#if defined(RTEMS_SMP)
842#define PER_CPU_INTERRUPT_FRAME_AREA \
843  PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
844#endif
845
846#define THREAD_DISPATCH_DISABLE_LEVEL \
847  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
848#define ISR_NEST_LEVEL \
849  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
850#define DISPATCH_NEEDED \
851  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
852
853#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
854
855#ifdef __cplusplus
856}
857#endif
858
859#endif
860/* end of include file */
Note: See TracBrowser for help on using the repository browser.