source: rtems/cpukit/include/rtems/score/percpu.h @ 85d6e845

5
Last change on this file since 85d6e845 was 85d6e845, checked in by Sebastian Huber <sebastian.huber@…>, on 04/19/19 at 09:01:31

score: Add _Per_CPU_Add_job()

  • Property mode set to 100644
File size: 25.0 KB
Line 
1/**
2 *  @file
3 *
4 *  This include file defines the per CPU information required
5 *  by RTEMS.
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (c) 2012, 2018 embedded brains GmbH
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_PERCPU_H
20#define _RTEMS_PERCPU_H
21
22#include <rtems/score/cpuimpl.h>
23
24#if defined( ASM )
25  #include <rtems/asm.h>
26#else
27  #include <rtems/score/assert.h>
28  #include <rtems/score/chain.h>
29  #include <rtems/score/isrlock.h>
30  #include <rtems/score/smp.h>
31  #include <rtems/score/timestamp.h>
32  #include <rtems/score/watchdog.h>
33#endif
34
35#ifdef __cplusplus
36extern "C" {
37#endif
38
39#if defined(RTEMS_SMP)
40  #if defined(RTEMS_PROFILING)
41    #define PER_CPU_CONTROL_SIZE_APPROX \
42      ( 512 + CPU_PER_CPU_CONTROL_SIZE + CPU_INTERRUPT_FRAME_SIZE )
43  #elif defined(RTEMS_DEBUG) || CPU_SIZEOF_POINTER > 4
44    #define PER_CPU_CONTROL_SIZE_APPROX \
45      ( 256 + CPU_PER_CPU_CONTROL_SIZE + CPU_INTERRUPT_FRAME_SIZE )
46  #else
47    #define PER_CPU_CONTROL_SIZE_APPROX \
48      ( 180 + CPU_PER_CPU_CONTROL_SIZE + CPU_INTERRUPT_FRAME_SIZE )
49  #endif
50
51  /*
52   * This ensures that on SMP configurations the individual per-CPU controls
53   * are on different cache lines to prevent false sharing.  This define can be
54   * used in assembler code to easily get the per-CPU control for a particular
55   * processor.
56   */
57  #if PER_CPU_CONTROL_SIZE_APPROX > 1024
58    #define PER_CPU_CONTROL_SIZE_LOG2 11
59  #elif PER_CPU_CONTROL_SIZE_APPROX > 512
60    #define PER_CPU_CONTROL_SIZE_LOG2 10
61  #elif PER_CPU_CONTROL_SIZE_APPROX > 256
62    #define PER_CPU_CONTROL_SIZE_LOG2 9
63  #elif PER_CPU_CONTROL_SIZE_APPROX > 128
64    #define PER_CPU_CONTROL_SIZE_LOG2 8
65  #else
66    #define PER_CPU_CONTROL_SIZE_LOG2 7
67  #endif
68
69  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
70#endif
71
72#if !defined( ASM )
73
74struct Record_Control;
75
76struct _Thread_Control;
77
78struct Scheduler_Context;
79
80/**
81 *  @defgroup PerCPU RTEMS Per CPU Information
82 *
83 *  @ingroup RTEMSScore
84 *
85 *  This defines the per CPU state information required by RTEMS
86 *  and the BSP.  In an SMP configuration, there will be multiple
87 *  instances of this data structure -- one per CPU -- and the
88 *  current CPU number will be used as the index.
89 */
90
91/**@{*/
92
93#if defined( RTEMS_SMP )
94
95/**
96 * @brief State of a processor.
97 *
98 * The processor state controls the life cycle of processors at the lowest
99 * level.  No multi-threading or other high-level concepts matter here.
100 *
101 * State changes must be initiated via _Per_CPU_State_change().  This function
102 * may not return in case someone requested a shutdown.  The
103 * _SMP_Send_message() function will be used to notify other processors about
104 * state changes if the other processor is in the up state.
105 *
106 * Due to the sequential nature of the basic system initialization one
107 * processor has a special role.  It is the processor executing the boot_card()
108 * function.  This processor is called the boot processor.  All other
109 * processors are called secondary.
110 *
111 * @dot
112 * digraph states {
113 *   i [label="PER_CPU_STATE_INITIAL"];
114 *   rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"];
115 *   reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"];
116 *   u [label="PER_CPU_STATE_UP"];
117 *   s [label="PER_CPU_STATE_SHUTDOWN"];
118 *   i -> rdy [label="processor\ncompleted initialization"];
119 *   rdy -> reqsm [label="boot processor\ncompleted initialization"];
120 *   reqsm -> u [label="processor\nstarts multitasking"];
121 *   i -> s;
122 *   rdy -> s;
123 *   reqsm -> s;
124 *   u -> s;
125 * }
126 * @enddot
127 */
128typedef enum {
129  /**
130   * @brief The per CPU controls are initialized to zero.
131   *
132   * The boot processor executes the sequential boot code in this state.  The
133   * secondary processors should perform their basic initialization now and
134   * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this
135   * is complete.
136   */
137  PER_CPU_STATE_INITIAL,
138
139  /**
140   * @brief Processor is ready to start multitasking.
141   *
142   * The secondary processor performed its basic initialization and is ready to
143   * receive inter-processor interrupts.  Interrupt delivery must be disabled
144   * in this state, but requested inter-processor interrupts must be recorded
145   * and must be delivered once the secondary processor enables interrupts for
146   * the first time.  The boot processor will wait for all secondary processors
147   * to change into this state.  In case a secondary processor does not reach
148   * this state the system will not start.  The secondary processors wait now
149   * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set
150   * by the boot processor once all secondary processors reached the
151   * PER_CPU_STATE_READY_TO_START_MULTITASKING state.
152   */
153  PER_CPU_STATE_READY_TO_START_MULTITASKING,
154
155  /**
156   * @brief Multitasking start of processor is requested.
157   *
158   * The boot processor completed system initialization and is about to perform
159   * a context switch to its heir thread.  Secondary processors should now
160   * issue a context switch to the heir thread.  This normally enables
161   * interrupts on the processor for the first time.
162   */
163  PER_CPU_STATE_REQUEST_START_MULTITASKING,
164
165  /**
166   * @brief Normal multitasking state.
167   */
168  PER_CPU_STATE_UP,
169
170  /**
171   * @brief This is the terminal state.
172   */
173  PER_CPU_STATE_SHUTDOWN
174} Per_CPU_State;
175
176typedef void ( *Per_CPU_Job_handler )( void *arg );
177
178/**
179 * @brief Context for per-processor jobs.
180 *
181 * This is separate from Per_CPU_Job to save stack memory in
182 * _SMP_Multicast_action().
183 */
184typedef struct {
185  /**
186   * @brief The job handler.
187   */
188  Per_CPU_Job_handler handler;
189
190  /**
191   * @brief The job handler argument.
192   */
193  void *arg;
194} Per_CPU_Job_context;
195
196/*
197 * Value for the Per_CPU_Job::done member to indicate that a job is done
198 * (handler was called on the target processor).  Must not be a valid pointer
199 * value since it overlaps with the Per_CPU_Job::next member.
200 */
201#define PER_CPU_JOB_DONE 1
202
203/**
204 * @brief A per-processor job.
205 *
206 * This structure must be as small as possible due to stack space constraints
207 * in _SMP_Multicast_action().
208 */
209typedef struct Per_CPU_Job {
210  union {
211    /**
212     * @brief The next job in the corresponding per-processor job list.
213     */
214    struct Per_CPU_Job *next;
215
216    /**
217     * @brief Indication if the job is done.
218     *
219     * A job is done if this member has the value PER_CPU_JOB_DONE.  This
220     * assumes that PER_CPU_JOB_DONE is not a valid pointer value.
221     */
222    Atomic_Ulong done;
223  };
224
225  /**
226   * @brief Pointer to the job context to get the handler and argument.
227   */
228  const Per_CPU_Job_context *context;
229} Per_CPU_Job;
230
231#endif /* defined( RTEMS_SMP ) */
232
233/**
234 * @brief Per-CPU statistics.
235 */
236typedef struct {
237#if defined( RTEMS_PROFILING )
238  /**
239   * @brief The thread dispatch disabled begin instant in CPU counter ticks.
240   *
241   * This value is used to measure the time of disabled thread dispatching.
242   */
243  CPU_Counter_ticks thread_dispatch_disabled_instant;
244
245  /**
246   * @brief The maximum time of disabled thread dispatching in CPU counter
247   * ticks.
248   */
249  CPU_Counter_ticks max_thread_dispatch_disabled_time;
250
251  /**
252   * @brief The maximum time spent to process a single sequence of nested
253   * interrupts in CPU counter ticks.
254   *
255   * This is the time interval between the change of the interrupt nest level
256   * from zero to one and the change back from one to zero.
257   */
258  CPU_Counter_ticks max_interrupt_time;
259
260  /**
261   * @brief The maximum interrupt delay in CPU counter ticks if supported by
262   * the hardware.
263   */
264  CPU_Counter_ticks max_interrupt_delay;
265
266  /**
267   * @brief Count of times when the thread dispatch disable level changes from
268   * zero to one in thread context.
269   *
270   * This value may overflow.
271   */
272  uint64_t thread_dispatch_disabled_count;
273
274  /**
275   * @brief Total time of disabled thread dispatching in CPU counter ticks.
276   *
277   * The average time of disabled thread dispatching is the total time of
278   * disabled thread dispatching divided by the thread dispatch disabled
279   * count.
280   *
281   * This value may overflow.
282   */
283  uint64_t total_thread_dispatch_disabled_time;
284
285  /**
286   * @brief Count of times when the interrupt nest level changes from zero to
287   * one.
288   *
289   * This value may overflow.
290   */
291  uint64_t interrupt_count;
292
293  /**
294   * @brief Total time of interrupt processing in CPU counter ticks.
295   *
296   * The average time of interrupt processing is the total time of interrupt
297   * processing divided by the interrupt count.
298   *
299   * This value may overflow.
300   */
301  uint64_t total_interrupt_time;
302#endif /* defined( RTEMS_PROFILING ) */
303} Per_CPU_Stats;
304
305/**
306 * @brief Per-CPU watchdog header index.
307 */
308typedef enum {
309  /**
310   * @brief Index for tick clock per-CPU watchdog header.
311   *
312   * The reference time point for the tick clock is the system start.  The
313   * clock resolution is one system clock tick.  It is used for the system
314   * clock tick based time services.
315   */
316  PER_CPU_WATCHDOG_TICKS,
317
318  /**
319   * @brief Index for realtime clock per-CPU watchdog header.
320   *
321   * The reference time point for the realtime clock is the POSIX Epoch.  The
322   * clock resolution is one nanosecond.  It is used for the time of day
323   * services and the POSIX services using CLOCK_REALTIME.
324   */
325  PER_CPU_WATCHDOG_REALTIME,
326
327  /**
328   * @brief Index for monotonic clock per-CPU watchdog header.
329   *
330   * The reference time point for the monotonic clock is the system start.  The
331   * clock resolution is one nanosecond.  It is used for the POSIX services
332   * using CLOCK_MONOTONIC.
333   */
334  PER_CPU_WATCHDOG_MONOTONIC,
335
336  /**
337   * @brief Count of per-CPU watchdog headers.
338   */
339  PER_CPU_WATCHDOG_COUNT
340} Per_CPU_Watchdog_index;
341
342/**
343 *  @brief Per CPU Core Structure
344 *
345 *  This structure is used to hold per core state information.
346 */
347typedef struct Per_CPU_Control {
348  #if CPU_PER_CPU_CONTROL_SIZE > 0
349    /**
350     * @brief CPU port specific control.
351     */
352    CPU_Per_CPU_control cpu_per_cpu;
353  #endif
354
355  /**
356   * @brief The interrupt stack low address for this processor.
357   */
358  void *interrupt_stack_low;
359
360  /**
361   * @brief The interrupt stack high address for this processor.
362   */
363  void *interrupt_stack_high;
364
365  /**
366   *  This contains the current interrupt nesting level on this
367   *  CPU.
368   */
369  uint32_t isr_nest_level;
370
371  /**
372   * @brief Indicates if an ISR thread dispatch is disabled.
373   *
374   * This flag is context switched with each thread.  It indicates that this
375   * thread has an interrupt stack frame on its stack.  By using this flag, we
376   * can avoid nesting more interrupt dispatching attempts on a previously
377   * interrupted thread's stack.
378   */
379  uint32_t isr_dispatch_disable;
380
381  /**
382   * @brief The thread dispatch critical section nesting counter which is used
383   * to prevent context switches at inopportune moments.
384   */
385  volatile uint32_t thread_dispatch_disable_level;
386
387  /**
388   * @brief This is set to true when this processor needs to run the thread
389   * dispatcher.
390   *
391   * It is volatile since interrupts may alter this flag.
392   *
393   * This member is not protected by a lock and must be accessed only by this
394   * processor.  Code (e.g. scheduler and post-switch action requests) running
395   * on another processors must use an inter-processor interrupt to set the
396   * thread dispatch necessary indicator to true.
397   *
398   * @see _Thread_Get_heir_and_make_it_executing().
399   */
400  volatile bool dispatch_necessary;
401
402  /*
403   * Ensure that the executing member is at least 4-byte aligned, see
404   * PER_CPU_OFFSET_EXECUTING.  This is necessary on CPU ports with relaxed
405   * alignment restrictions, e.g. type alignment is less than the type size.
406   */
407  bool reserved_for_executing_alignment[ 3 ];
408
409  /**
410   * @brief This is the thread executing on this processor.
411   *
412   * This member is not protected by a lock.  The only writer is this
413   * processor.
414   *
415   * On SMP configurations a thread may be registered as executing on more than
416   * one processor in case a thread migration is in progress.  On SMP
417   * configurations use _Thread_Is_executing_on_a_processor() to figure out if
418   * a thread context is executing on a processor.
419   */
420  struct _Thread_Control *executing;
421
422  /**
423   * @brief This is the heir thread for this processor.
424   *
425   * This member is not protected by a lock.  The only writer after
426   * multitasking start is the scheduler owning this processor.  It is assumed
427   * that stores to pointers are atomic on all supported SMP architectures.
428   * The CPU port specific code (inter-processor interrupt handling and
429   * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
430   * last value written.
431   *
432   * A thread can be a heir on at most one processor in the system.
433   *
434   * @see _Thread_Get_heir_and_make_it_executing().
435   */
436  struct _Thread_Control *heir;
437
438#if defined(RTEMS_SMP)
439  CPU_Interrupt_frame Interrupt_frame;
440#endif
441
442  /**
443   * @brief The CPU usage timestamp contains the time point of the last heir
444   * thread change or last CPU usage update of the executing thread of this
445   * processor.
446   *
447   * Protected by the scheduler lock.
448   *
449   * @see _Scheduler_Update_heir(), _Thread_Dispatch_update_heir() and
450   * _Thread_Get_CPU_time_used().
451   */
452  Timestamp_Control cpu_usage_timestamp;
453
454  /**
455   * @brief Watchdog state for this processor.
456   */
457  struct {
458    /**
459     * @brief Protects all watchdog operations on this processor.
460     */
461    ISR_LOCK_MEMBER( Lock )
462
463    /**
464     * @brief Watchdog ticks on this processor used for monotonic clock
465     * watchdogs.
466     */
467    uint64_t ticks;
468
469    /**
470     * @brief Header for watchdogs.
471     *
472     * @see Per_CPU_Watchdog_index.
473     */
474    Watchdog_Header Header[ PER_CPU_WATCHDOG_COUNT ];
475  } Watchdog;
476
477  #if defined( RTEMS_SMP )
478    /**
479     * @brief This lock protects some members of this structure.
480     */
481    ISR_lock_Control Lock;
482
483    /**
484     * @brief Lock context used to acquire all per-CPU locks.
485     *
486     * This member is protected by the Per_CPU_Control::Lock lock.
487     *
488     * @see _Per_CPU_Acquire_all().
489     */
490    ISR_lock_Context Lock_context;
491
492    /**
493     * @brief Chain of threads in need for help.
494     *
495     * This member is protected by the Per_CPU_Control::Lock lock.
496     */
497    Chain_Control Threads_in_need_for_help;
498
499    /**
500     * @brief Bit field for SMP messages.
501     *
502     * This member is not protected locks.  Atomic operations are used to set
503     * and get the message bits.
504     */
505    Atomic_Ulong message;
506
507    struct {
508      /**
509       * @brief The scheduler control of the scheduler owning this processor.
510       *
511       * This pointer is NULL in case this processor is currently not used by a
512       * scheduler instance.
513       */
514      const struct _Scheduler_Control *control;
515
516      /**
517       * @brief The scheduler context of the scheduler owning this processor.
518       *
519       * This pointer is NULL in case this processor is currently not used by a
520       * scheduler instance.
521       */
522      const struct Scheduler_Context *context;
523
524      /**
525       * @brief The idle thread for this processor in case it is online and
526       * currently not used by a scheduler instance.
527       */
528      struct _Thread_Control *idle_if_online_and_unused;
529    } Scheduler;
530
531    /**
532     * @brief Begin of the per-CPU data area.
533     *
534     * Contains items defined via PER_CPU_DATA_ITEM().
535     */
536    char *data;
537
538    /**
539     * @brief Indicates the current state of the CPU.
540     *
541     * This member is protected by the _Per_CPU_State_lock lock.
542     *
543     * @see _Per_CPU_State_change().
544     */
545    Per_CPU_State state;
546
547    /**
548     * @brief FIFO list of jobs to be performed by this processor.
549     *
550     * @see _SMP_Multicast_action().
551     */
552    struct {
553      /**
554       * @brief Lock to protect the FIFO list of jobs to be performed by this
555       * processor.
556       */
557      ISR_lock_Control Lock;
558
559      /**
560       * @brief Head of the FIFO list of jobs to be performed by this
561       * processor.
562       *
563       * This member is protected by the Per_CPU_Control::Jobs::Lock lock.
564       */
565      struct Per_CPU_Job *head;
566
567      /**
568       * @brief Tail of the FIFO list of jobs to be performed by this
569       * processor.
570       *
571       * This member is only valid if the head is not @c NULL.
572       *
573       * This member is protected by the Per_CPU_Control::Jobs::Lock lock.
574       */
575      struct Per_CPU_Job **tail;
576    } Jobs;
577
578    /**
579     * @brief Indicates if the processor has been successfully started via
580     * _CPU_SMP_Start_processor().
581     */
582    bool online;
583
584    /**
585     * @brief Indicates if the processor is the one that performed the initial
586     * system initialization.
587     */
588    bool boot;
589  #endif
590
591  struct Record_Control *record;
592
593  Per_CPU_Stats Stats;
594} Per_CPU_Control;
595
596#if defined( RTEMS_SMP )
597typedef struct {
598  Per_CPU_Control per_cpu;
599  char unused_space_for_cache_line_alignment
600    [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
601} Per_CPU_Control_envelope;
602#else
603typedef struct {
604  Per_CPU_Control per_cpu;
605} Per_CPU_Control_envelope;
606#endif
607
608/**
609 *  @brief Set of Per CPU Core Information
610 *
611 *  This is an array of per CPU core information.
612 */
613extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
614
615#define _Per_CPU_Acquire( cpu, lock_context ) \
616  _ISR_lock_Acquire( &( cpu )->Lock, lock_context )
617
618#define _Per_CPU_Release( cpu, lock_context ) \
619  _ISR_lock_Release( &( cpu )->Lock, lock_context )
620
621/*
622 * If we get the current processor index in a context which allows thread
623 * dispatching, then we may already run on another processor right after the
624 * read instruction.  There are very few cases in which this makes sense (here
625 * we can use _Per_CPU_Get_snapshot()).  All other places must use
626 * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
627 */
628#if defined( _CPU_Get_current_per_CPU_control )
629  #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
630#else
631  #define _Per_CPU_Get_snapshot() \
632    ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
633#endif
634
635#if defined( RTEMS_SMP )
636static inline Per_CPU_Control *_Per_CPU_Get( void )
637{
638  Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
639
640  _Assert(
641    cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
642  );
643
644  return cpu_self;
645}
646#else
647#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
648#endif
649
650static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
651{
652  return &_Per_CPU_Information[ index ].per_cpu;
653}
654
655static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
656{
657  const Per_CPU_Control_envelope *per_cpu_envelope =
658    ( const Per_CPU_Control_envelope * ) cpu;
659
660  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
661}
662
663static inline struct _Thread_Control *_Per_CPU_Get_executing(
664  const Per_CPU_Control *cpu
665)
666{
667  return cpu->executing;
668}
669
670static inline bool _Per_CPU_Is_processor_online(
671  const Per_CPU_Control *cpu
672)
673{
674#if defined( RTEMS_SMP )
675  return cpu->online;
676#else
677  (void) cpu;
678
679  return true;
680#endif
681}
682
683static inline bool _Per_CPU_Is_boot_processor(
684  const Per_CPU_Control *cpu
685)
686{
687#if defined( RTEMS_SMP )
688  return cpu->boot;
689#else
690  (void) cpu;
691
692  return true;
693#endif
694}
695
696RTEMS_INLINE_ROUTINE void _Per_CPU_Acquire_all(
697  ISR_lock_Context *lock_context
698)
699{
700#if defined(RTEMS_SMP)
701  uint32_t         cpu_max;
702  uint32_t         cpu_index;
703  Per_CPU_Control *previous_cpu;
704
705  cpu_max = _SMP_Get_processor_maximum();
706  previous_cpu = _Per_CPU_Get_by_index( 0 );
707
708  _ISR_lock_ISR_disable( lock_context );
709  _Per_CPU_Acquire( previous_cpu, lock_context );
710
711  for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
712     Per_CPU_Control *cpu;
713
714     cpu = _Per_CPU_Get_by_index( cpu_index );
715     _Per_CPU_Acquire( cpu, &previous_cpu->Lock_context );
716     previous_cpu = cpu;
717  }
718#else
719  _ISR_lock_ISR_disable( lock_context );
720#endif
721}
722
723RTEMS_INLINE_ROUTINE void _Per_CPU_Release_all(
724  ISR_lock_Context *lock_context
725)
726{
727#if defined(RTEMS_SMP)
728  uint32_t         cpu_max;
729  uint32_t         cpu_index;
730  Per_CPU_Control *cpu;
731
732  cpu_max = _SMP_Get_processor_maximum();
733  cpu = _Per_CPU_Get_by_index( cpu_max - 1 );
734
735  for ( cpu_index = cpu_max - 1 ; cpu_index > 0 ; --cpu_index ) {
736     Per_CPU_Control *previous_cpu;
737
738     previous_cpu = _Per_CPU_Get_by_index( cpu_index - 1 );
739     _Per_CPU_Release( cpu, &previous_cpu->Lock_context );
740     cpu = previous_cpu;
741  }
742
743  _Per_CPU_Release( cpu, lock_context );
744  _ISR_lock_ISR_enable( lock_context );
745#else
746  _ISR_lock_ISR_enable( lock_context );
747#endif
748}
749
750#if defined( RTEMS_SMP )
751
752/**
753 *  @brief Allocate and Initialize Per CPU Structures
754 *
755 *  This method allocates and initialize the per CPU structure.
756 */
757void _Per_CPU_Initialize(void);
758
759void _Per_CPU_State_change(
760  Per_CPU_Control *cpu,
761  Per_CPU_State new_state
762);
763
764/**
765 * @brief Waits for a processor to change into a non-initial state.
766 *
767 * This function should be called only in _CPU_SMP_Start_processor() if
768 * required by the CPU port or BSP.
769 *
770 * @code
771 * bool _CPU_SMP_Start_processor(uint32_t cpu_index)
772 * {
773 *   uint32_t timeout = 123456;
774 *
775 *   start_the_processor(cpu_index);
776 *
777 *   return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout);
778 * }
779 * @endcode
780 *
781 * @param[in] cpu_index The processor index.
782 * @param[in] timeout_in_ns The timeout in nanoseconds.  Use a value of zero to
783 * wait forever if necessary.
784 *
785 * @retval true The processor is in a non-initial state.
786 * @retval false The timeout expired before the processor reached a non-initial
787 * state.
788 */
789bool _Per_CPU_State_wait_for_non_initial_state(
790  uint32_t cpu_index,
791  uint32_t timeout_in_ns
792);
793
794/**
795 * @brief Performs the jobs of the specified processor in FIFO order.
796 *
797 * @param[in, out] cpu The jobs of this processor will be performed.
798 */
799void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu );
800
801/**
802 * @brief Adds the job to the tail of the processing list of the specified
803 * processor.
804 *
805 * This function does not send the SMP_MESSAGE_PERFORM_JOBS message the
806 * specified processor.
807 *
808 * @param[in, out] cpu The processor to add the job.
809 * @param[in, out] job The job.  The Per_CPU_Job::context member must be
810 *   initialized by the caller.
811 */
812void _Per_CPU_Add_job( Per_CPU_Control *cpu, Per_CPU_Job *job );
813
814#endif /* defined( RTEMS_SMP ) */
815
816/*
817 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
818 * Thus when built for non-SMP, there should be no performance penalty.
819 */
820#define _Thread_Dispatch_disable_level \
821  _Per_CPU_Get()->thread_dispatch_disable_level
822#define _Thread_Heir \
823  _Per_CPU_Get()->heir
824
825#if defined(_CPU_Get_thread_executing)
826#define _Thread_Executing \
827  _CPU_Get_thread_executing()
828#else
829#define _Thread_Executing \
830  _Per_CPU_Get_executing( _Per_CPU_Get() )
831#endif
832
833#define _ISR_Nest_level \
834  _Per_CPU_Get()->isr_nest_level
835#define _CPU_Interrupt_stack_low \
836  _Per_CPU_Get()->interrupt_stack_low
837#define _CPU_Interrupt_stack_high \
838  _Per_CPU_Get()->interrupt_stack_high
839#define _Thread_Dispatch_necessary \
840  _Per_CPU_Get()->dispatch_necessary
841
842/**
843 * @brief Returns the thread control block of the executing thread.
844 *
845 * This function can be called in any thread context.  On SMP configurations,
846 * interrupts are disabled to ensure that the processor index is used
847 * consistently if no CPU port specific method is available to get the
848 * executing thread.
849 *
850 * @return The thread control block of the executing thread.
851 */
852RTEMS_INLINE_ROUTINE struct _Thread_Control *_Thread_Get_executing( void )
853{
854  struct _Thread_Control *executing;
855
856  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
857    ISR_Level level;
858
859    _ISR_Local_disable( level );
860  #endif
861
862  executing = _Thread_Executing;
863
864  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
865    _ISR_Local_enable( level );
866  #endif
867
868  return executing;
869}
870
871/**@}*/
872
873#endif /* !defined( ASM ) */
874
875#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
876
877#define PER_CPU_INTERRUPT_STACK_LOW \
878  CPU_PER_CPU_CONTROL_SIZE
879#define PER_CPU_INTERRUPT_STACK_HIGH \
880  PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
881
882#define INTERRUPT_STACK_LOW \
883  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
884#define INTERRUPT_STACK_HIGH \
885  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
886
887/*
888 *  These are the offsets of the required elements in the per CPU table.
889 */
890#define PER_CPU_ISR_NEST_LEVEL \
891  PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
892#define PER_CPU_ISR_DISPATCH_DISABLE \
893  PER_CPU_ISR_NEST_LEVEL + 4
894#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
895  PER_CPU_ISR_DISPATCH_DISABLE + 4
896#define PER_CPU_DISPATCH_NEEDED \
897  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
898#define PER_CPU_OFFSET_EXECUTING \
899  PER_CPU_DISPATCH_NEEDED + 4
900#define PER_CPU_OFFSET_HEIR \
901  PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
902#if defined(RTEMS_SMP)
903#define PER_CPU_INTERRUPT_FRAME_AREA \
904  PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
905#endif
906
907#define THREAD_DISPATCH_DISABLE_LEVEL \
908  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
909#define ISR_NEST_LEVEL \
910  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
911#define DISPATCH_NEEDED \
912  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
913
914#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
915
916#ifdef __cplusplus
917}
918#endif
919
920#endif
921/* end of include file */
Note: See TracBrowser for help on using the repository browser.