1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @brief Allocate and Initialize Per CPU Structures |
---|
5 | * @ingroup PerCPU |
---|
6 | */ |
---|
7 | |
---|
8 | /* |
---|
9 | * COPYRIGHT (c) 1989-2011. |
---|
10 | * On-Line Applications Research Corporation (OAR). |
---|
11 | * |
---|
12 | * The license and distribution terms for this file may be |
---|
13 | * found in the file LICENSE in this distribution or at |
---|
14 | * http://www.rtems.org/license/LICENSE. |
---|
15 | */ |
---|
16 | |
---|
17 | #if HAVE_CONFIG_H |
---|
18 | #include "config.h" |
---|
19 | #endif |
---|
20 | |
---|
21 | #include <rtems/score/percpu.h> |
---|
22 | #include <rtems/score/assert.h> |
---|
23 | #include <rtems/score/isrlock.h> |
---|
24 | #include <rtems/score/smpimpl.h> |
---|
25 | #include <rtems/config.h> |
---|
26 | |
---|
27 | RTEMS_STATIC_ASSERT( |
---|
28 | sizeof( CPU_Uint32ptr ) >= sizeof( uintptr_t ), |
---|
29 | CPU_Uint32ptr_greater_equal_uintptr_t |
---|
30 | ); |
---|
31 | |
---|
32 | RTEMS_STATIC_ASSERT( |
---|
33 | sizeof( CPU_Uint32ptr ) >= sizeof( uint32_t ), |
---|
34 | CPU_Uint32ptr_greater_equal_uint32_t |
---|
35 | ); |
---|
36 | |
---|
37 | #if defined(RTEMS_SMP) |
---|
38 | |
---|
39 | typedef struct { |
---|
40 | SMP_Action_handler handler; |
---|
41 | void *arg; |
---|
42 | } SMP_Before_multicast_action; |
---|
43 | |
---|
44 | ISR_LOCK_DEFINE( static, _Per_CPU_State_lock, "Per-CPU State" ) |
---|
45 | |
---|
46 | static void _Per_CPU_State_acquire( ISR_lock_Context *lock_context ) |
---|
47 | { |
---|
48 | _ISR_lock_ISR_disable_and_acquire( &_Per_CPU_State_lock, lock_context ); |
---|
49 | } |
---|
50 | |
---|
51 | static void _Per_CPU_State_release( ISR_lock_Context *lock_context ) |
---|
52 | { |
---|
53 | _ISR_lock_Release_and_ISR_enable( &_Per_CPU_State_lock, lock_context ); |
---|
54 | } |
---|
55 | |
---|
56 | static void _Per_CPU_State_before_multitasking_action( Per_CPU_Control *cpu ) |
---|
57 | { |
---|
58 | uintptr_t action_value; |
---|
59 | |
---|
60 | action_value = _Atomic_Load_uintptr( |
---|
61 | &cpu->before_multitasking_action, |
---|
62 | ATOMIC_ORDER_ACQUIRE |
---|
63 | ); |
---|
64 | |
---|
65 | if ( action_value != 0 ) { |
---|
66 | SMP_Before_multicast_action *action = |
---|
67 | (SMP_Before_multicast_action *) action_value; |
---|
68 | |
---|
69 | ( *action->handler )( action->arg ); |
---|
70 | |
---|
71 | _Atomic_Store_uintptr( |
---|
72 | &cpu->before_multitasking_action, |
---|
73 | 0, |
---|
74 | ATOMIC_ORDER_RELEASE |
---|
75 | ); |
---|
76 | } |
---|
77 | } |
---|
78 | |
---|
79 | static void _Per_CPU_State_busy_wait( |
---|
80 | Per_CPU_Control *cpu, |
---|
81 | Per_CPU_State new_state |
---|
82 | ) |
---|
83 | { |
---|
84 | Per_CPU_State state = cpu->state; |
---|
85 | |
---|
86 | switch ( new_state ) { |
---|
87 | case PER_CPU_STATE_REQUEST_START_MULTITASKING: |
---|
88 | while ( |
---|
89 | state != PER_CPU_STATE_READY_TO_START_MULTITASKING |
---|
90 | && state != PER_CPU_STATE_SHUTDOWN |
---|
91 | ) { |
---|
92 | _Per_CPU_Perform_jobs( cpu ); |
---|
93 | _CPU_SMP_Processor_event_receive(); |
---|
94 | state = cpu->state; |
---|
95 | } |
---|
96 | break; |
---|
97 | case PER_CPU_STATE_UP: |
---|
98 | while ( |
---|
99 | state != PER_CPU_STATE_REQUEST_START_MULTITASKING |
---|
100 | && state != PER_CPU_STATE_SHUTDOWN |
---|
101 | ) { |
---|
102 | _Per_CPU_State_before_multitasking_action( cpu ); |
---|
103 | _Per_CPU_Perform_jobs( cpu ); |
---|
104 | _CPU_SMP_Processor_event_receive(); |
---|
105 | state = cpu->state; |
---|
106 | } |
---|
107 | break; |
---|
108 | default: |
---|
109 | /* No need to wait */ |
---|
110 | break; |
---|
111 | } |
---|
112 | } |
---|
113 | |
---|
114 | static Per_CPU_State _Per_CPU_State_get_next( |
---|
115 | Per_CPU_State current_state, |
---|
116 | Per_CPU_State new_state |
---|
117 | ) |
---|
118 | { |
---|
119 | switch ( current_state ) { |
---|
120 | case PER_CPU_STATE_INITIAL: |
---|
121 | switch ( new_state ) { |
---|
122 | case PER_CPU_STATE_READY_TO_START_MULTITASKING: |
---|
123 | case PER_CPU_STATE_SHUTDOWN: |
---|
124 | /* Change is acceptable */ |
---|
125 | break; |
---|
126 | default: |
---|
127 | new_state = PER_CPU_STATE_SHUTDOWN; |
---|
128 | break; |
---|
129 | } |
---|
130 | break; |
---|
131 | case PER_CPU_STATE_READY_TO_START_MULTITASKING: |
---|
132 | switch ( new_state ) { |
---|
133 | case PER_CPU_STATE_REQUEST_START_MULTITASKING: |
---|
134 | case PER_CPU_STATE_SHUTDOWN: |
---|
135 | /* Change is acceptable */ |
---|
136 | break; |
---|
137 | default: |
---|
138 | new_state = PER_CPU_STATE_SHUTDOWN; |
---|
139 | break; |
---|
140 | } |
---|
141 | break; |
---|
142 | case PER_CPU_STATE_REQUEST_START_MULTITASKING: |
---|
143 | switch ( new_state ) { |
---|
144 | case PER_CPU_STATE_UP: |
---|
145 | case PER_CPU_STATE_SHUTDOWN: |
---|
146 | /* Change is acceptable */ |
---|
147 | break; |
---|
148 | default: |
---|
149 | new_state = PER_CPU_STATE_SHUTDOWN; |
---|
150 | break; |
---|
151 | } |
---|
152 | break; |
---|
153 | default: |
---|
154 | new_state = PER_CPU_STATE_SHUTDOWN; |
---|
155 | break; |
---|
156 | } |
---|
157 | |
---|
158 | return new_state; |
---|
159 | } |
---|
160 | |
---|
161 | void _Per_CPU_State_change( |
---|
162 | Per_CPU_Control *cpu, |
---|
163 | Per_CPU_State new_state |
---|
164 | ) |
---|
165 | { |
---|
166 | ISR_lock_Context lock_context; |
---|
167 | Per_CPU_State next_state; |
---|
168 | |
---|
169 | _Per_CPU_State_busy_wait( cpu, new_state ); |
---|
170 | |
---|
171 | _Per_CPU_State_acquire( &lock_context ); |
---|
172 | |
---|
173 | next_state = _Per_CPU_State_get_next( cpu->state, new_state ); |
---|
174 | cpu->state = next_state; |
---|
175 | |
---|
176 | if ( next_state == PER_CPU_STATE_SHUTDOWN ) { |
---|
177 | uint32_t cpu_max = rtems_configuration_get_maximum_processors(); |
---|
178 | uint32_t cpu_index; |
---|
179 | |
---|
180 | for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { |
---|
181 | Per_CPU_Control *cpu_other = _Per_CPU_Get_by_index( cpu_index ); |
---|
182 | |
---|
183 | if ( cpu_other != cpu ) { |
---|
184 | switch ( cpu_other->state ) { |
---|
185 | case PER_CPU_STATE_UP: |
---|
186 | _SMP_Send_message( cpu_index, SMP_MESSAGE_SHUTDOWN ); |
---|
187 | break; |
---|
188 | default: |
---|
189 | /* Nothing to do */ |
---|
190 | break; |
---|
191 | } |
---|
192 | |
---|
193 | cpu_other->state = PER_CPU_STATE_SHUTDOWN; |
---|
194 | } |
---|
195 | } |
---|
196 | } |
---|
197 | |
---|
198 | _CPU_SMP_Processor_event_broadcast(); |
---|
199 | |
---|
200 | _Per_CPU_State_release( &lock_context ); |
---|
201 | |
---|
202 | if ( |
---|
203 | next_state == PER_CPU_STATE_SHUTDOWN |
---|
204 | && new_state != PER_CPU_STATE_SHUTDOWN |
---|
205 | ) { |
---|
206 | _SMP_Fatal( SMP_FATAL_SHUTDOWN ); |
---|
207 | } |
---|
208 | } |
---|
209 | |
---|
210 | bool _SMP_Before_multitasking_action( |
---|
211 | Per_CPU_Control *cpu, |
---|
212 | SMP_Action_handler handler, |
---|
213 | void *arg |
---|
214 | ) |
---|
215 | { |
---|
216 | bool done; |
---|
217 | |
---|
218 | _Assert( _Per_CPU_Is_boot_processor( _Per_CPU_Get() ) ); |
---|
219 | |
---|
220 | if ( _Per_CPU_Is_processor_online( cpu ) ) { |
---|
221 | SMP_Before_multicast_action action = { |
---|
222 | .handler = handler, |
---|
223 | .arg = arg |
---|
224 | }; |
---|
225 | Per_CPU_State expected_state = PER_CPU_STATE_READY_TO_START_MULTITASKING; |
---|
226 | |
---|
227 | _Atomic_Store_uintptr( |
---|
228 | &cpu->before_multitasking_action, |
---|
229 | (uintptr_t) &action, |
---|
230 | ATOMIC_ORDER_RELEASE |
---|
231 | ); |
---|
232 | |
---|
233 | _CPU_SMP_Processor_event_broadcast(); |
---|
234 | |
---|
235 | _Per_CPU_State_busy_wait( cpu, expected_state ); |
---|
236 | |
---|
237 | do { |
---|
238 | done = _Atomic_Load_uintptr( |
---|
239 | &cpu->before_multitasking_action, |
---|
240 | ATOMIC_ORDER_ACQUIRE |
---|
241 | ) == 0; |
---|
242 | } while ( !done && cpu->state == expected_state ); |
---|
243 | } else { |
---|
244 | done = false; |
---|
245 | } |
---|
246 | |
---|
247 | return done; |
---|
248 | } |
---|
249 | |
---|
250 | #else |
---|
251 | /* |
---|
252 | * On single core systems, we can efficiently directly access a single |
---|
253 | * statically allocated per cpu structure. And the fields are initialized |
---|
254 | * as individual elements just like it has always been done. |
---|
255 | */ |
---|
256 | Per_CPU_Control_envelope _Per_CPU_Information[1]; |
---|
257 | #endif |
---|