source: rtems/cpukit/include/rtems/score/percpu.h

Last change on this file was bcef89f2, checked in by Sebastian Huber <sebastian.huber@…>, on 05/19/23 at 06:18:25

Update company name

The embedded brains GmbH & Co. KG is the legal successor of embedded
brains GmbH.

  • Property mode set to 100644
File size: 28.5 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSScorePerCPU
7 *
8 * @brief This header file provides the interfaces of the
9 *   @ref RTEMSScorePerCPU.
10 */
11
12/*
13 *  COPYRIGHT (c) 1989-2011.
14 *  On-Line Applications Research Corporation (OAR).
15 *
16 *  Copyright (C) 2012, 2018 embedded brains GmbH & Co. KG
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 *    notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 *    notice, this list of conditions and the following disclaimer in the
25 *    documentation and/or other materials provided with the distribution.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#ifndef _RTEMS_PERCPU_H
41#define _RTEMS_PERCPU_H
42
43#include <rtems/score/cpuimpl.h>
44
45#if defined( ASM )
46  #include <rtems/asm.h>
47#else
48  #include <rtems/score/assert.h>
49  #include <rtems/score/chain.h>
50  #include <rtems/score/isrlock.h>
51  #include <rtems/score/smp.h>
52  #include <rtems/score/timestamp.h>
53  #include <rtems/score/watchdog.h>
54#endif
55
56#ifdef __cplusplus
57extern "C" {
58#endif
59
60#if defined( RTEMS_SMP )
61  #if defined( RTEMS_PROFILING )
62    #define PER_CPU_CONTROL_SIZE_PROFILING 332
63  #else
64    #define PER_CPU_CONTROL_SIZE_PROFILING 0
65  #endif
66
67  #if defined( RTEMS_DEBUG )
68    #define PER_CPU_CONTROL_SIZE_DEBUG 76
69  #else
70    #define PER_CPU_CONTROL_SIZE_DEBUG 0
71  #endif
72
73  #if CPU_SIZEOF_POINTER > 4
74    #define PER_CPU_CONTROL_SIZE_BIG_POINTER 76
75  #else
76    #define PER_CPU_CONTROL_SIZE_BIG_POINTER 0
77  #endif
78
79  #define PER_CPU_CONTROL_SIZE_BASE 180
80  #define PER_CPU_CONTROL_SIZE_APPROX \
81    ( PER_CPU_CONTROL_SIZE_BASE + CPU_PER_CPU_CONTROL_SIZE + \
82    CPU_INTERRUPT_FRAME_SIZE + PER_CPU_CONTROL_SIZE_PROFILING + \
83    PER_CPU_CONTROL_SIZE_DEBUG + PER_CPU_CONTROL_SIZE_BIG_POINTER )
84
85  /*
86   * This ensures that on SMP configurations the individual per-CPU controls
87   * are on different cache lines to prevent false sharing.  This define can be
88   * used in assembler code to easily get the per-CPU control for a particular
89   * processor.
90   */
91  #if PER_CPU_CONTROL_SIZE_APPROX > 1024
92    #define PER_CPU_CONTROL_SIZE_LOG2 11
93  #elif PER_CPU_CONTROL_SIZE_APPROX > 512
94    #define PER_CPU_CONTROL_SIZE_LOG2 10
95  #elif PER_CPU_CONTROL_SIZE_APPROX > 256
96    #define PER_CPU_CONTROL_SIZE_LOG2 9
97  #elif PER_CPU_CONTROL_SIZE_APPROX > 128
98    #define PER_CPU_CONTROL_SIZE_LOG2 8
99  #else
100    #define PER_CPU_CONTROL_SIZE_LOG2 7
101  #endif
102
103  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
104#endif
105
106#if !defined( ASM )
107
108struct Record_Control;
109
110struct _Thread_Control;
111
112struct Scheduler_Context;
113
114/**
115 * @defgroup RTEMSScorePerCPU Per-CPU Information
116 *
117 * @ingroup RTEMSScore
118 *
119 * @brief This group contains the implementation of the per-CPU information.
120 *
121 * The per-CPU information encapsulates state which is maintained for each
122 * configured processor in the system.  There will be one instance of a
123 * ::Per_CPU_Control in the ::_Per_CPU_Information table for each configured
124 * processor in the system.
125 */
126
127/**@{*/
128
129#if defined( RTEMS_SMP )
130
131/**
132 * @brief State of a processor.
133 *
134 * The processor state controls the life cycle of processors at the lowest
135 * level.  No multi-threading or other high-level concepts matter here.
136 *
137 * The state of a processor is indicated by the Per_CPU_Control::state membe.
138 * The current state of a processor can be get by _Per_CPU_Get_state().  Only
139 * the processor associated with the control may change its state using
140 * _Per_CPU_Set_state().
141 *
142 * Due to the sequential nature of the basic system initialization one
143 * processor has a special role.  It is the processor executing the boot_card()
144 * function.  This processor is called the boot processor.  All other
145 * processors are called secondary.  The boot processor uses
146 * _SMP_Request_start_multitasking() to indicate that processors should start
147 * multiprocessing.  Secondary processors will wait for this request in
148 * _SMP_Start_multitasking_on_secondary_processor().
149 *
150 * @dot
151 * digraph states {
152 *   i [label="PER_CPU_STATE_INITIAL"];
153 *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
154 *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
155 *   u [label="PER_CPU_STATE_UP"];
156 *   s [label="PER_CPU_STATE_SHUTDOWN"];
157 *   i -> rdy [label="processor\ncompleted initialization"];
158 *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
159 *   reqsm -> u [label="processor\nstarts multitasking"];
160 *   i -> s;
161 *   rdy -> s;
162 *   reqsm -> s;
163 *   u -> s;
164 * }
165 * @enddot
166 */
167typedef enum {
168  /**
169   * @brief The per CPU controls are initialized to zero.
170   *
171   * The boot processor executes the sequential boot code in this state.  The
172   * secondary processors should perform their basic initialization now and
173   * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
174   * is complete.
175   */
176  PER_CPU_STATE_INITIAL,
177
178  /**
179   * @brief Processor is ready to start multitasking.
180   *
181   * The secondary processor performed its basic initialization and is ready to
182   * receive inter-processor interrupts.  Interrupt delivery must be disabled
183   * in this state, but requested inter-processor interrupts must be recorded
184   * and must be delivered once the secondary processor enables interrupts for
185   * the first time.  The boot processor will wait for all secondary processors
186   * to change into this state.  In case a secondary processor does not reach
187   * this state the system will not start.  The secondary processors wait now
188   * for a change into the PER_CPU_STATE_UP state set requested by the boot
189   * processor through ::_SMP_Ready_to_start_multitasking once all secondary
190   * processors reached the PER_CPU_STATE_READY_TO_START_MULTITASKING state.
191   */
192  PER_CPU_STATE_READY_TO_START_MULTITASKING,
193
194  /**
195   * @brief Normal multitasking state.
196   */
197  PER_CPU_STATE_UP,
198
199  /**
200   * @brief This is the terminal state.
201   */
202  PER_CPU_STATE_SHUTDOWN
203} Per_CPU_State;
204
205typedef void ( *Per_CPU_Job_handler )( void *arg );
206
207/**
208 * @brief Context for per-processor jobs.
209 *
210 * This is separate from Per_CPU_Job to save stack memory in
211 * _SMP_Multicast_action().
212 */
213typedef struct {
214  /**
215   * @brief The job handler.
216   */
217  Per_CPU_Job_handler handler;
218
219  /**
220   * @brief The job handler argument.
221   */
222  void *arg;
223} Per_CPU_Job_context;
224
225/*
226 * Value for the Per_CPU_Job::done member to indicate that a job is done
227 * (handler was called on the target processor).  Must not be a valid pointer
228 * value since it overlaps with the Per_CPU_Job::next member.
229 */
230#define PER_CPU_JOB_DONE 1
231
232/**
233 * @brief A per-processor job.
234 *
235 * This structure must be as small as possible due to stack space constraints
236 * in _SMP_Multicast_action().
237 */
238typedef struct Per_CPU_Job {
239  union {
240    /**
241     * @brief The next job in the corresponding per-processor job list.
242     */
243    struct Per_CPU_Job *next;
244
245    /**
246     * @brief Indication if the job is done.
247     *
248     * A job is done if this member has the value PER_CPU_JOB_DONE.  This
249     * assumes that PER_CPU_JOB_DONE is not a valid pointer value.
250     */
251    Atomic_Ulong done;
252  };
253
254  /**
255   * @brief Pointer to the job context to get the handler and argument.
256   */
257  const Per_CPU_Job_context *context;
258} Per_CPU_Job;
259
260#endif /* defined( RTEMS_SMP ) */
261
262/**
263 * @brief Per-CPU statistics.
264 */
265
266/*
267 * This was added to address the following warning:
268 * warning: struct has no members
269 */
270#pragma GCC diagnostic push
271#pragma GCC diagnostic ignored "-Wpedantic"
272typedef struct {
273#if defined( RTEMS_PROFILING )
274  /**
275   * @brief The thread dispatch disabled begin instant in CPU counter ticks.
276   *
277   * This value is used to measure the time of disabled thread dispatching.
278   */
279  CPU_Counter_ticks thread_dispatch_disabled_instant;
280
281  /**
282   * @brief The maximum time of disabled thread dispatching in CPU counter
283   * ticks.
284   */
285  CPU_Counter_ticks max_thread_dispatch_disabled_time;
286
287  /**
288   * @brief The maximum time spent to process a single sequence of nested
289   * interrupts in CPU counter ticks.
290   *
291   * This is the time interval between the change of the interrupt nest level
292   * from zero to one and the change back from one to zero.
293   */
294  CPU_Counter_ticks max_interrupt_time;
295
296  /**
297   * @brief The maximum interrupt delay in CPU counter ticks if supported by
298   * the hardware.
299   */
300  CPU_Counter_ticks max_interrupt_delay;
301
302  /**
303   * @brief Count of times when the thread dispatch disable level changes from
304   * zero to one in thread context.
305   *
306   * This value may overflow.
307   */
308  uint64_t thread_dispatch_disabled_count;
309
310  /**
311   * @brief Total time of disabled thread dispatching in CPU counter ticks.
312   *
313   * The average time of disabled thread dispatching is the total time of
314   * disabled thread dispatching divided by the thread dispatch disabled
315   * count.
316   *
317   * This value may overflow.
318   */
319  uint64_t total_thread_dispatch_disabled_time;
320
321  /**
322   * @brief Count of times when the interrupt nest level changes from zero to
323   * one.
324   *
325   * This value may overflow.
326   */
327  uint64_t interrupt_count;
328
329  /**
330   * @brief Total time of interrupt processing in CPU counter ticks.
331   *
332   * The average time of interrupt processing is the total time of interrupt
333   * processing divided by the interrupt count.
334   *
335   * This value may overflow.
336   */
337  uint64_t total_interrupt_time;
338#endif /* defined( RTEMS_PROFILING ) */
339} Per_CPU_Stats;
340#pragma GCC diagnostic pop
341
342/**
343 * @brief Per-CPU watchdog header index.
344 */
345typedef enum {
346  /**
347   * @brief Index for tick clock per-CPU watchdog header.
348   *
349   * The reference time point for the tick clock is the system start.  The
350   * clock resolution is one system clock tick.  It is used for the system
351   * clock tick based time services.
352   */
353  PER_CPU_WATCHDOG_TICKS,
354
355  /**
356   * @brief Index for realtime clock per-CPU watchdog header.
357   *
358   * The reference time point for the realtime clock is the POSIX Epoch.  The
359   * clock resolution is one nanosecond.  It is used for the time of day
360   * services and the POSIX services using CLOCK_REALTIME.
361   */
362  PER_CPU_WATCHDOG_REALTIME,
363
364  /**
365   * @brief Index for monotonic clock per-CPU watchdog header.
366   *
367   * The reference time point for the monotonic clock is the system start.  The
368   * clock resolution is one nanosecond.  It is used for the POSIX services
369   * using CLOCK_MONOTONIC.
370   */
371  PER_CPU_WATCHDOG_MONOTONIC,
372
373  /**
374   * @brief Count of per-CPU watchdog headers.
375   */
376  PER_CPU_WATCHDOG_COUNT
377} Per_CPU_Watchdog_index;
378
379/**
380 *  @brief Per CPU Core Structure
381 *
382 *  This structure is used to hold per core state information.
383 */
384typedef struct Per_CPU_Control {
385  #if CPU_PER_CPU_CONTROL_SIZE > 0
386    /**
387     * @brief CPU port specific control.
388     */
389    CPU_Per_CPU_control cpu_per_cpu;
390  #endif
391
392  /**
393   * @brief The interrupt stack low address for this processor.
394   */
395  void *interrupt_stack_low;
396
397  /**
398   * @brief The interrupt stack high address for this processor.
399   */
400  void *interrupt_stack_high;
401
402  /**
403   *  This contains the current interrupt nesting level on this
404   *  CPU.
405   */
406  uint32_t isr_nest_level;
407
408  /**
409   * @brief Indicates if an ISR thread dispatch is disabled.
410   *
411   * This flag is context switched with each thread.  It indicates that this
412   * thread has an interrupt stack frame on its stack.  By using this flag, we
413   * can avoid nesting more interrupt dispatching attempts on a previously
414   * interrupted thread's stack.
415   */
416  uint32_t isr_dispatch_disable;
417
418  /**
419   * @brief The thread dispatch critical section nesting counter which is used
420   * to prevent context switches at inopportune moments.
421   */
422  volatile uint32_t thread_dispatch_disable_level;
423
424  /**
425   * @brief This is set to true when this processor needs to run the thread
426   * dispatcher.
427   *
428   * It is volatile since interrupts may alter this flag.
429   *
430   * This member is not protected by a lock and must be accessed only by this
431   * processor.  Code (e.g. scheduler and post-switch action requests) running
432   * on another processors must use an inter-processor interrupt to set the
433   * thread dispatch necessary indicator to true.
434   *
435   * @see _Thread_Get_heir_and_make_it_executing().
436   */
437  volatile bool dispatch_necessary;
438
439  /*
440   * Ensure that the executing member is at least 4-byte aligned, see
441   * PER_CPU_OFFSET_EXECUTING.  This is necessary on CPU ports with relaxed
442   * alignment restrictions, e.g. type alignment is less than the type size.
443   */
444  bool reserved_for_executing_alignment[ 3 ];
445
446  /**
447   * @brief This is the thread executing on this processor.
448   *
449   * This member is not protected by a lock.  The only writer is this
450   * processor.
451   *
452   * On SMP configurations a thread may be registered as executing on more than
453   * one processor in case a thread migration is in progress.  On SMP
454   * configurations use _Thread_Is_executing_on_a_processor() to figure out if
455   * a thread context is executing on a processor.
456   */
457  struct _Thread_Control *executing;
458
459  /**
460   * @brief This is the heir thread for this processor.
461   *
462   * This member is not protected by a lock.  The only writer after
463   * multitasking start is the scheduler owning this processor.  It is assumed
464   * that stores to pointers are atomic on all supported SMP architectures.
465   * The CPU port specific code (inter-processor interrupt handling and
466   * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
467   * last value written.
468   *
469   * A thread can be a heir on at most one processor in the system.
470   *
471   * @see _Thread_Get_heir_and_make_it_executing().
472   */
473  struct _Thread_Control *heir;
474
475#if defined(RTEMS_SMP)
476  CPU_Interrupt_frame Interrupt_frame;
477#endif
478
479  /**
480   * @brief The CPU usage timestamp contains the time point of the last heir
481   * thread change or last CPU usage update of the executing thread of this
482   * processor.
483   *
484   * Protected by the scheduler lock.
485   *
486   * @see _Scheduler_Update_heir(), _Thread_Dispatch_update_heir() and
487   * _Thread_Get_CPU_time_used().
488   */
489  Timestamp_Control cpu_usage_timestamp;
490
491  /**
492   * @brief Watchdog state for this processor.
493   */
494  struct {
495#if defined(RTEMS_SMP)
496    /**
497     * @brief Protects all watchdog operations on this processor.
498     */
499    ISR_lock_Control Lock;
500#endif
501
502    /**
503     * @brief Watchdog ticks on this processor used for monotonic clock
504     * watchdogs.
505     */
506    uint64_t ticks;
507
508    /**
509     * @brief Header for watchdogs.
510     *
511     * @see Per_CPU_Watchdog_index.
512     */
513    Watchdog_Header Header[ PER_CPU_WATCHDOG_COUNT ];
514  } Watchdog;
515
516  #if defined( RTEMS_SMP )
517    /**
518     * @brief This lock protects some members of this structure.
519     */
520    ISR_lock_Control Lock;
521
522    /**
523     * @brief Lock context used to acquire all per-CPU locks.
524     *
525     * This member is protected by the Per_CPU_Control::Lock lock.
526     *
527     * @see _Per_CPU_Acquire_all().
528     */
529    ISR_lock_Context Lock_context;
530
531    /**
532     * @brief Chain of threads in need for help.
533     *
534     * This member is protected by the Per_CPU_Control::Lock lock.
535     */
536    Chain_Control Threads_in_need_for_help;
537
538    /**
539     * @brief Bit field for SMP messages.
540     *
541     * This member is not protected locks.  Atomic operations are used to set
542     * and get the message bits.
543     */
544    Atomic_Ulong message;
545
546    struct {
547      /**
548       * @brief The scheduler control of the scheduler owning this processor.
549       *
550       * This pointer is NULL in case this processor is currently not used by a
551       * scheduler instance.
552       */
553      const struct _Scheduler_Control *control;
554
555      /**
556       * @brief The scheduler context of the scheduler owning this processor.
557       *
558       * This pointer is NULL in case this processor is currently not used by a
559       * scheduler instance.
560       */
561      const struct Scheduler_Context *context;
562
563      /**
564       * @brief The idle thread for this processor in case it is online and
565       * currently not used by a scheduler instance.
566       */
567      struct _Thread_Control *idle_if_online_and_unused;
568    } Scheduler;
569
570    /**
571     * @brief The ancestor of the executing thread.
572     *
573     * This member is used by _User_extensions_Thread_switch().
574     */
575    struct _Thread_Control *ancestor;
576
577    /**
578     * @brief Begin of the per-CPU data area.
579     *
580     * Contains items defined via PER_CPU_DATA_ITEM().
581     */
582    char *data;
583
584    /**
585     * @brief Indicates the current state of the processor.
586     *
587     * Only the processor associated with this control is allowed to change
588     * this member.
589     *
590     * @see _Per_CPU_Get_state() and _Per_CPU_Set_state().
591     */
592    Atomic_Uint state;
593
594    /**
595     * @brief FIFO list of jobs to be performed by this processor.
596     *
597     * @see _SMP_Multicast_action().
598     */
599    struct {
600      /**
601       * @brief Lock to protect the FIFO list of jobs to be performed by this
602       * processor.
603       */
604      ISR_lock_Control Lock;
605
606      /**
607       * @brief Head of the FIFO list of jobs to be performed by this
608       * processor.
609       *
610       * This member is protected by the Per_CPU_Control::Jobs::Lock lock.
611       */
612      struct Per_CPU_Job *head;
613
614      /**
615       * @brief Tail of the FIFO list of jobs to be performed by this
616       * processor.
617       *
618       * This member is only valid if the head is not @c NULL.
619       *
620       * This member is protected by the Per_CPU_Control::Jobs::Lock lock.
621       */
622      struct Per_CPU_Job **tail;
623    } Jobs;
624
625    /**
626     * @brief Indicates if the processor has been successfully started via
627     * _CPU_SMP_Start_processor().
628     */
629    bool online;
630
631    /**
632     * @brief Indicates if the processor is the one that performed the initial
633     * system initialization.
634     */
635    bool boot;
636  #endif
637
638  struct Record_Control *record;
639
640  Per_CPU_Stats Stats;
641} Per_CPU_Control;
642
643#if defined( RTEMS_SMP )
644typedef struct {
645  Per_CPU_Control per_cpu;
646  char unused_space_for_cache_line_alignment
647    [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
648} Per_CPU_Control_envelope;
649#else
650typedef struct {
651  Per_CPU_Control per_cpu;
652} Per_CPU_Control_envelope;
653#endif
654
655/**
656 *  @brief Set of Per CPU Core Information
657 *
658 *  This is an array of per CPU core information.
659 */
660extern CPU_STRUCTURE_ALIGNMENT Per_CPU_Control_envelope _Per_CPU_Information[];
661
662#define _Per_CPU_Acquire( cpu, lock_context ) \
663  _ISR_lock_Acquire( &( cpu )->Lock, lock_context )
664
665#define _Per_CPU_Release( cpu, lock_context ) \
666  _ISR_lock_Release( &( cpu )->Lock, lock_context )
667
668/*
669 * If we get the current processor index in a context which allows thread
670 * dispatching, then we may already run on another processor right after the
671 * read instruction.  There are very few cases in which this makes sense (here
672 * we can use _Per_CPU_Get_snapshot()).  All other places must use
673 * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
674 */
675#if defined( _CPU_Get_current_per_CPU_control )
676  #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
677#else
678  #define _Per_CPU_Get_snapshot() \
679    ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
680#endif
681
682#if defined( RTEMS_SMP )
683static inline Per_CPU_Control *_Per_CPU_Get( void )
684{
685  Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
686
687  _Assert(
688    cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
689  );
690
691  return cpu_self;
692}
693#else
694#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
695#endif
696
697static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
698{
699  return &_Per_CPU_Information[ index ].per_cpu;
700}
701
702static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
703{
704#if defined(RTEMS_SMP)
705  const Per_CPU_Control_envelope *per_cpu_envelope =
706    ( const Per_CPU_Control_envelope * ) cpu;
707
708  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
709#else
710  (void) cpu;
711  return 0;
712#endif
713}
714
715static inline struct _Thread_Control *_Per_CPU_Get_executing(
716  const Per_CPU_Control *cpu
717)
718{
719  return cpu->executing;
720}
721
722static inline bool _Per_CPU_Is_ISR_in_progress( const Per_CPU_Control *cpu )
723{
724#if CPU_PROVIDES_ISR_IS_IN_PROGRESS == TRUE
725  (void) cpu;
726  return _ISR_Is_in_progress();
727#else
728  return cpu->isr_nest_level != 0;
729#endif
730}
731
732static inline bool _Per_CPU_Is_processor_online(
733  const Per_CPU_Control *cpu
734)
735{
736#if defined( RTEMS_SMP )
737  return cpu->online;
738#else
739  (void) cpu;
740
741  return true;
742#endif
743}
744
745static inline bool _Per_CPU_Is_boot_processor(
746  const Per_CPU_Control *cpu
747)
748{
749#if defined( RTEMS_SMP )
750  return cpu->boot;
751#else
752  (void) cpu;
753
754  return true;
755#endif
756}
757
758static inline void _Per_CPU_Acquire_all(
759  ISR_lock_Context *lock_context
760)
761{
762#if defined(RTEMS_SMP)
763  uint32_t         cpu_max;
764  uint32_t         cpu_index;
765  Per_CPU_Control *previous_cpu;
766
767  cpu_max = _SMP_Get_processor_maximum();
768  previous_cpu = _Per_CPU_Get_by_index( 0 );
769
770  _ISR_lock_ISR_disable( lock_context );
771  _Per_CPU_Acquire( previous_cpu, lock_context );
772
773  for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
774     Per_CPU_Control *cpu;
775
776     cpu = _Per_CPU_Get_by_index( cpu_index );
777     _Per_CPU_Acquire( cpu, &previous_cpu->Lock_context );
778     previous_cpu = cpu;
779  }
780#else
781  _ISR_lock_ISR_disable( lock_context );
782#endif
783}
784
785static inline void _Per_CPU_Release_all(
786  ISR_lock_Context *lock_context
787)
788{
789#if defined(RTEMS_SMP)
790  uint32_t         cpu_max;
791  uint32_t         cpu_index;
792  Per_CPU_Control *cpu;
793
794  cpu_max = _SMP_Get_processor_maximum();
795  cpu = _Per_CPU_Get_by_index( cpu_max - 1 );
796
797  for ( cpu_index = cpu_max - 1 ; cpu_index > 0 ; --cpu_index ) {
798     Per_CPU_Control *previous_cpu;
799
800     previous_cpu = _Per_CPU_Get_by_index( cpu_index - 1 );
801     _Per_CPU_Release( cpu, &previous_cpu->Lock_context );
802     cpu = previous_cpu;
803  }
804
805  _Per_CPU_Release( cpu, lock_context );
806  _ISR_lock_ISR_enable( lock_context );
807#else
808  _ISR_lock_ISR_enable( lock_context );
809#endif
810}
811
812#if defined( RTEMS_SMP )
813
814/**
815 * @brief Gets the current processor state.
816 *
817 * @param cpu is the processor control.
818 *
819 * @return Returns the current state of the processor.
820 */
821static inline Per_CPU_State _Per_CPU_Get_state( const Per_CPU_Control *cpu )
822{
823  return (Per_CPU_State)
824    _Atomic_Load_uint( &cpu->state, ATOMIC_ORDER_ACQUIRE );
825}
826
827/**
828 * @brief Sets the processor state of the current processor.
829 *
830 * @param cpu_self is the processor control of the processor executing this
831 *   function.
832 *
833 * @param state is the new processor state.
834 */
835static inline void _Per_CPU_Set_state(
836  Per_CPU_Control *cpu_self,
837  Per_CPU_State    state
838)
839{
840  _Assert( cpu_self == _Per_CPU_Get() );
841  _Atomic_Store_uint(
842    &cpu_self->state,
843    (unsigned int) state,
844    ATOMIC_ORDER_RELEASE
845  );
846}
847
848/**
849 * @brief Waits for a processor to change into a non-initial state.
850 *
851 * This function should be called only in _CPU_SMP_Start_processor() if
852 * required by the CPU port or BSP.
853 *
854 * @code
855 * bool _CPU_SMP_Start_processor(uint32_t cpu_index)
856 * {
857 *   uint32_t timeout = 123456;
858 *
859 *   start_the_processor(cpu_index);
860 *
861 *   return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout);
862 * }
863 * @endcode
864 *
865 * @param[in] cpu_index The processor index.
866 * @param[in] timeout_in_ns The timeout in nanoseconds.  Use a value of zero to
867 * wait forever if necessary.
868 *
869 * @retval true The processor is in a non-initial state.
870 * @retval false The timeout expired before the processor reached a non-initial
871 * state.
872 */
873bool _Per_CPU_State_wait_for_non_initial_state(
874  uint32_t cpu_index,
875  uint32_t timeout_in_ns
876);
877
878/**
879 * @brief Performs the jobs of the specified processor in FIFO order.
880 *
881 * @param[in, out] cpu The jobs of this processor will be performed.
882 */
883void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu );
884
885/**
886 * @brief Adds the job to the tail of the processing list of the processor.
887 *
888 * This function does not send the ::SMP_MESSAGE_PERFORM_JOBS message to the
889 * processor, see also _Per_CPU_Submit_job().
890 *
891 * @param[in, out] cpu The processor to add the job.
892 * @param[in, out] job The job.  The Per_CPU_Job::context member must be
893 *   initialized by the caller.
894 */
895void _Per_CPU_Add_job( Per_CPU_Control *cpu, Per_CPU_Job *job );
896
897/**
898 * @brief Adds the job to the tail of the processing list of the processor and
899 *   notifies the processor to process the job.
900 *
901 * This function sends the ::SMP_MESSAGE_PERFORM_JOBS message to the processor
902 * if it is in the ::PER_CPU_STATE_UP state, see also _Per_CPU_Add_job().
903 *
904 * @param[in, out] cpu The processor to add the job.
905 * @param[in, out] job The job.  The Per_CPU_Job::context member must be
906 *   initialized by the caller.
907 */
908void _Per_CPU_Submit_job( Per_CPU_Control *cpu, Per_CPU_Job *job );
909
910/**
911 * @brief Waits for the job carried out by the specified processor.
912 *
913 * This function may result in an SMP_FATAL_WRONG_CPU_STATE_TO_PERFORM_JOBS
914 * fatal error.
915 *
916 * @param[in] cpu The processor carrying out the job.
917 * @param[in] job The job to wait for.
918 */
919void _Per_CPU_Wait_for_job(
920  const Per_CPU_Control *cpu,
921  const Per_CPU_Job     *job
922);
923
924#endif /* defined( RTEMS_SMP ) */
925
926/*
927 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
928 * Thus when built for non-SMP, there should be no performance penalty.
929 */
930#define _Thread_Dispatch_disable_level \
931  _Per_CPU_Get()->thread_dispatch_disable_level
932#define _Thread_Heir \
933  _Per_CPU_Get()->heir
934
935#if defined(_CPU_Get_thread_executing)
936#define _Thread_Executing \
937  _CPU_Get_thread_executing()
938#else
939#define _Thread_Executing \
940  _Per_CPU_Get_executing( _Per_CPU_Get() )
941#endif
942
943#define _ISR_Nest_level \
944  _Per_CPU_Get()->isr_nest_level
945#define _CPU_Interrupt_stack_low \
946  _Per_CPU_Get()->interrupt_stack_low
947#define _CPU_Interrupt_stack_high \
948  _Per_CPU_Get()->interrupt_stack_high
949#define _Thread_Dispatch_necessary \
950  _Per_CPU_Get()->dispatch_necessary
951
952/**
953 * @brief Returns the thread control block of the executing thread.
954 *
955 * This function can be called in any thread context.  On SMP configurations,
956 * interrupts are disabled to ensure that the processor index is used
957 * consistently if no CPU port specific method is available to get the
958 * executing thread.
959 *
960 * @return The thread control block of the executing thread.
961 */
962static inline struct _Thread_Control *_Thread_Get_executing( void )
963{
964  struct _Thread_Control *executing;
965
966  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
967    ISR_Level level;
968
969    _ISR_Local_disable( level );
970  #endif
971
972  executing = _Thread_Executing;
973
974  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
975    _ISR_Local_enable( level );
976  #endif
977
978  return executing;
979}
980
981/**@}*/
982
983#endif /* !defined( ASM ) */
984
985#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
986
987#define PER_CPU_INTERRUPT_STACK_LOW \
988  CPU_PER_CPU_CONTROL_SIZE
989#define PER_CPU_INTERRUPT_STACK_HIGH \
990  PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
991
992#define INTERRUPT_STACK_LOW \
993  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
994#define INTERRUPT_STACK_HIGH \
995  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
996
997/*
998 *  These are the offsets of the required elements in the per CPU table.
999 */
1000#define PER_CPU_ISR_NEST_LEVEL \
1001  PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
1002#define PER_CPU_ISR_DISPATCH_DISABLE \
1003  PER_CPU_ISR_NEST_LEVEL + 4
1004#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
1005  PER_CPU_ISR_DISPATCH_DISABLE + 4
1006#define PER_CPU_DISPATCH_NEEDED \
1007  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
1008#define PER_CPU_OFFSET_EXECUTING \
1009  PER_CPU_DISPATCH_NEEDED + 4
1010#define PER_CPU_OFFSET_HEIR \
1011  PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
1012#if defined(RTEMS_SMP)
1013#define PER_CPU_INTERRUPT_FRAME_AREA \
1014  PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
1015#endif
1016
1017#define THREAD_DISPATCH_DISABLE_LEVEL \
1018  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
1019#define ISR_NEST_LEVEL \
1020  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
1021#define DISPATCH_NEEDED \
1022  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
1023
1024#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
1025
1026#ifdef __cplusplus
1027}
1028#endif
1029
1030#endif
1031/* end of include file */
Note: See TracBrowser for help on using the repository browser.