source: rtems/cpukit/score/src/smp.c @ 38b59a6

4.115
Last change on this file since 38b59a6 was 38b59a6, checked in by Sebastian Huber <sebastian.huber@…>, on 05/02/14 at 08:31:09

score: Implement forced thread migration

The current implementation of task migration in RTEMS has some
implications with respect to the interrupt latency. It is crucial to
preserve the system invariant that a task can execute on at most one
processor in the system at a time. This is accomplished with a boolean
indicator in the task context. The processor architecture specific
low-level task context switch code will mark that a task context is no
longer executing and waits that the heir context stopped execution
before it restores the heir context and resumes execution of the heir
task. So there is one point in time in which a processor is without a
task. This is essential to avoid cyclic dependencies in case multiple
tasks migrate at once. Otherwise some supervising entity is necessary to
prevent life-locks. Such a global supervisor would lead to scalability
problems so this approach is not used. Currently the thread dispatch is
performed with interrupts disabled. So in case the heir task is
currently executing on another processor then this prolongs the time of
disabled interrupts since one processor has to wait for another
processor to make progress.

It is difficult to avoid this issue with the interrupt latency since
interrupts normally store the context of the interrupted task on its
stack. In case a task is marked as not executing we must not use its
task stack to store such an interrupt context. We cannot use the heir
stack before it stopped execution on another processor. So if we enable
interrupts during this transition we have to provide an alternative task
independent stack for this time frame. This issue needs further
investigation.

  • Property mode set to 100644
File size: 4.6 KB
Line 
1/**
2 *  @file
3 *
4 *  @brief SMP Support
5 *  @ingroup Score
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.org/license/LICENSE.
15 */
16
17#if HAVE_CONFIG_H
18#include "config.h"
19#endif
20
21#include <rtems/score/smpimpl.h>
22#include <rtems/score/assert.h>
23#include <rtems/score/schedulerimpl.h>
24#include <rtems/score/threaddispatch.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/config.h>
27
28static void _SMP_Start_processors( uint32_t cpu_count )
29{
30  uint32_t cpu_index_self = _SMP_Get_current_processor();
31  uint32_t cpu_index;
32
33
34  for ( cpu_index = 0 ; cpu_index < cpu_count; ++cpu_index ) {
35    const Scheduler_Assignment *assignment =
36      _Scheduler_Get_assignment( cpu_index );
37    Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
38    bool started;
39
40    if ( cpu_index != cpu_index_self ) {
41      if ( _Scheduler_Should_start_processor( assignment ) ) {
42        started = _CPU_SMP_Start_processor( cpu_index );
43
44        if ( !started && _Scheduler_Is_mandatory_processor( assignment ) ) {
45          _SMP_Fatal( SMP_FATAL_START_OF_MANDATORY_PROCESSOR_FAILED );
46        }
47      } else {
48        started = false;
49      }
50    } else {
51      started = true;
52
53      if ( !_Scheduler_Should_start_processor( assignment ) ) {
54        _SMP_Fatal( SMP_FATAL_BOOT_PROCESSOR_NOT_ASSIGNED_TO_SCHEDULER );
55      }
56    }
57
58    cpu->started = started;
59
60    if ( started ) {
61      Scheduler_Context *scheduler_context = assignment->scheduler->context;
62
63      ++scheduler_context->processor_count;
64      cpu->scheduler_context = scheduler_context;
65    }
66  }
67}
68
69void _SMP_Handler_initialize( void )
70{
71  uint32_t cpu_max = rtems_configuration_get_maximum_processors();
72  uint32_t cpu_count;
73  uint32_t cpu_index;
74
75  for ( cpu_index = 0 ; cpu_index < cpu_max; ++cpu_index ) {
76    Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
77
78    _SMP_ticket_lock_Initialize( &cpu->Lock, "per-CPU" );
79  }
80
81  /*
82   * Discover and initialize the secondary cores in an SMP system.
83   */
84
85  cpu_count = _CPU_SMP_Initialize();
86  cpu_count = cpu_count < cpu_max ? cpu_count : cpu_max;
87  _SMP_Processor_count = cpu_count;
88
89  for ( cpu_index = cpu_count ; cpu_index < cpu_max; ++cpu_index ) {
90    const Scheduler_Assignment *assignment =
91      _Scheduler_Get_assignment( cpu_index );
92
93    if ( _Scheduler_Is_mandatory_processor( assignment ) ) {
94      _SMP_Fatal( SMP_FATAL_MANDATORY_PROCESSOR_NOT_PRESENT );
95    }
96  }
97
98  _SMP_Start_processors( cpu_count );
99
100  _CPU_SMP_Finalize_initialization( cpu_count );
101}
102
103void _SMP_Request_start_multitasking( void )
104{
105  Per_CPU_Control *self_cpu = _Per_CPU_Get();
106  uint32_t cpu_count = _SMP_Get_processor_count();
107  uint32_t cpu_index;
108
109  _Per_CPU_State_change( self_cpu, PER_CPU_STATE_READY_TO_START_MULTITASKING );
110
111  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
112    Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
113
114    _Per_CPU_State_change( cpu, PER_CPU_STATE_REQUEST_START_MULTITASKING );
115  }
116}
117
118void _SMP_Start_multitasking_on_secondary_processor( void )
119{
120  Per_CPU_Control *self_cpu = _Per_CPU_Get();
121
122  if ( !_Per_CPU_Is_processor_started( self_cpu ) ) {
123    _SMP_Fatal( SMP_FATAL_MULTITASKING_START_ON_UNASSIGNED_PROCESSOR );
124  }
125
126  _Per_CPU_State_change( self_cpu, PER_CPU_STATE_READY_TO_START_MULTITASKING );
127
128  _Thread_Start_multitasking();
129}
130
131void _SMP_Request_shutdown( void )
132{
133  Per_CPU_Control *self_cpu = _Per_CPU_Get();
134
135  _Per_CPU_State_change( self_cpu, PER_CPU_STATE_SHUTDOWN );
136
137  /*
138   * We have to drop the Giant lock here in order to give other processors the
139   * opportunity to receive the inter-processor interrupts issued previously.
140   * In case the executing thread still holds SMP locks, then other processors
141   * already waiting for this SMP lock will spin forever.
142   */
143  _Giant_Drop( self_cpu );
144}
145
146void _SMP_Send_message( uint32_t cpu_index, unsigned long message )
147{
148  Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
149
150  _Atomic_Fetch_or_ulong( &cpu->message, message, ATOMIC_ORDER_RELAXED );
151
152  _CPU_SMP_Send_interrupt( cpu_index );
153}
154
155void _SMP_Broadcast_message( uint32_t message )
156{
157  uint32_t cpu_count = _SMP_Get_processor_count();
158  uint32_t cpu_index_self = _SMP_Get_current_processor();
159  uint32_t cpu_index;
160
161  _Assert( _Debug_Is_thread_dispatching_allowed() );
162
163  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
164    if ( cpu_index != cpu_index_self ) {
165      _SMP_Send_message( cpu_index, message );
166    }
167  }
168}
169
170SMP_Test_message_handler _SMP_Test_message_handler;
Note: See TracBrowser for help on using the repository browser.