1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @brief SMP Scheduler Implementation |
---|
5 | * |
---|
6 | * @ingroup ScoreSchedulerSMP |
---|
7 | */ |
---|
8 | |
---|
9 | /* |
---|
10 | * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved. |
---|
11 | * |
---|
12 | * embedded brains GmbH |
---|
13 | * Dornierstr. 4 |
---|
14 | * 82178 Puchheim |
---|
15 | * Germany |
---|
16 | * <rtems@embedded-brains.de> |
---|
17 | * |
---|
18 | * The license and distribution terms for this file may be |
---|
19 | * found in the file LICENSE in this distribution or at |
---|
20 | * http://www.rtems.org/license/LICENSE. |
---|
21 | */ |
---|
22 | |
---|
23 | #ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H |
---|
24 | #define _RTEMS_SCORE_SCHEDULERSMPIMPL_H |
---|
25 | |
---|
26 | #include <rtems/score/schedulersmp.h> |
---|
27 | #include <rtems/score/schedulersimpleimpl.h> |
---|
28 | #include <rtems/score/chainimpl.h> |
---|
29 | #include <rtems/score/scheduler.h> |
---|
30 | |
---|
31 | #ifdef __cplusplus |
---|
32 | extern "C" { |
---|
33 | #endif /* __cplusplus */ |
---|
34 | |
---|
35 | /** |
---|
36 | * @addtogroup ScoreSchedulerSMP |
---|
37 | * |
---|
38 | * @{ |
---|
39 | */ |
---|
40 | |
---|
41 | typedef Thread_Control *( *Scheduler_SMP_Get_highest_ready )( |
---|
42 | Scheduler_SMP_Control *self |
---|
43 | ); |
---|
44 | |
---|
45 | typedef void ( *Scheduler_SMP_Extract )( |
---|
46 | Scheduler_SMP_Control *self, |
---|
47 | Thread_Control *thread |
---|
48 | ); |
---|
49 | |
---|
50 | typedef void ( *Scheduler_SMP_Insert )( |
---|
51 | Scheduler_SMP_Control *self, |
---|
52 | Thread_Control *thread_to_insert |
---|
53 | ); |
---|
54 | |
---|
55 | typedef void ( *Scheduler_SMP_Move )( |
---|
56 | Scheduler_SMP_Control *self, |
---|
57 | Thread_Control *thread_to_move |
---|
58 | ); |
---|
59 | |
---|
60 | static inline void _Scheduler_SMP_Initialize( |
---|
61 | Scheduler_SMP_Control *self |
---|
62 | ) |
---|
63 | { |
---|
64 | _Chain_Initialize_empty( &self->Scheduled ); |
---|
65 | } |
---|
66 | |
---|
67 | static inline void _Scheduler_SMP_Allocate_processor( |
---|
68 | Thread_Control *scheduled, |
---|
69 | Thread_Control *victim |
---|
70 | ) |
---|
71 | { |
---|
72 | Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled ); |
---|
73 | Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim ); |
---|
74 | Thread_Control *heir; |
---|
75 | |
---|
76 | scheduled->is_scheduled = true; |
---|
77 | victim->is_scheduled = false; |
---|
78 | |
---|
79 | _Per_CPU_Acquire( cpu_of_scheduled ); |
---|
80 | |
---|
81 | if ( scheduled->is_executing ) { |
---|
82 | heir = cpu_of_scheduled->heir; |
---|
83 | cpu_of_scheduled->heir = scheduled; |
---|
84 | } else { |
---|
85 | heir = scheduled; |
---|
86 | } |
---|
87 | |
---|
88 | _Per_CPU_Release( cpu_of_scheduled ); |
---|
89 | |
---|
90 | if ( heir != victim ) { |
---|
91 | const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get(); |
---|
92 | |
---|
93 | _Thread_Set_CPU( heir, cpu_of_victim ); |
---|
94 | |
---|
95 | /* |
---|
96 | * FIXME: Here we need atomic store operations with a relaxed memory order. |
---|
97 | * The _CPU_SMP_Send_interrupt() will ensure that the change can be |
---|
98 | * observed consistently. |
---|
99 | */ |
---|
100 | cpu_of_victim->heir = heir; |
---|
101 | cpu_of_victim->dispatch_necessary = true; |
---|
102 | |
---|
103 | if ( cpu_of_victim != cpu_of_executing ) { |
---|
104 | _Per_CPU_Send_interrupt( cpu_of_victim ); |
---|
105 | } |
---|
106 | } |
---|
107 | } |
---|
108 | |
---|
109 | static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled( |
---|
110 | Scheduler_SMP_Control *self |
---|
111 | ) |
---|
112 | { |
---|
113 | Thread_Control *lowest_ready = NULL; |
---|
114 | Chain_Control *scheduled = &self->Scheduled; |
---|
115 | |
---|
116 | if ( !_Chain_Is_empty( scheduled ) ) { |
---|
117 | lowest_ready = (Thread_Control *) _Chain_Last( scheduled ); |
---|
118 | } |
---|
119 | |
---|
120 | return lowest_ready; |
---|
121 | } |
---|
122 | |
---|
123 | static inline void _Scheduler_SMP_Enqueue_ordered( |
---|
124 | Scheduler_SMP_Control *self, |
---|
125 | Thread_Control *thread, |
---|
126 | Chain_Node_order order, |
---|
127 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
128 | Scheduler_SMP_Insert insert_ready, |
---|
129 | Scheduler_SMP_Insert insert_scheduled, |
---|
130 | Scheduler_SMP_Move move_from_ready_to_scheduled, |
---|
131 | Scheduler_SMP_Move move_from_scheduled_to_ready |
---|
132 | ) |
---|
133 | { |
---|
134 | if ( thread->is_in_the_air ) { |
---|
135 | Thread_Control *highest_ready = ( *get_highest_ready )( self ); |
---|
136 | |
---|
137 | thread->is_in_the_air = false; |
---|
138 | |
---|
139 | /* |
---|
140 | * The thread has been extracted from the scheduled chain. We have to |
---|
141 | * place it now on the scheduled or ready chain. |
---|
142 | * |
---|
143 | * NOTE: Do not exchange parameters to do the negation of the order check. |
---|
144 | */ |
---|
145 | if ( |
---|
146 | highest_ready != NULL |
---|
147 | && !( *order )( &thread->Object.Node, &highest_ready->Object.Node ) |
---|
148 | ) { |
---|
149 | _Scheduler_SMP_Allocate_processor( highest_ready, thread ); |
---|
150 | |
---|
151 | ( *insert_ready )( self, thread ); |
---|
152 | ( *move_from_ready_to_scheduled )( self, highest_ready ); |
---|
153 | } else { |
---|
154 | thread->is_scheduled = true; |
---|
155 | |
---|
156 | ( *insert_scheduled )( self, thread ); |
---|
157 | } |
---|
158 | } else { |
---|
159 | Thread_Control *lowest_scheduled = _Scheduler_SMP_Get_lowest_scheduled( self ); |
---|
160 | |
---|
161 | /* |
---|
162 | * The scheduled chain is empty if nested interrupts change the priority of |
---|
163 | * all scheduled threads. These threads are in the air. |
---|
164 | */ |
---|
165 | if ( |
---|
166 | lowest_scheduled != NULL |
---|
167 | && ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) |
---|
168 | ) { |
---|
169 | _Scheduler_SMP_Allocate_processor( thread, lowest_scheduled ); |
---|
170 | |
---|
171 | ( *insert_scheduled )( self, thread ); |
---|
172 | ( *move_from_scheduled_to_ready )( self, lowest_scheduled ); |
---|
173 | } else { |
---|
174 | ( *insert_ready )( self, thread ); |
---|
175 | } |
---|
176 | } |
---|
177 | } |
---|
178 | |
---|
179 | static inline void _Scheduler_SMP_Schedule_highest_ready( |
---|
180 | Scheduler_SMP_Control *self, |
---|
181 | Thread_Control *victim, |
---|
182 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
183 | Scheduler_SMP_Move move_from_ready_to_scheduled |
---|
184 | ) |
---|
185 | { |
---|
186 | Thread_Control *highest_ready = ( *get_highest_ready )( self ); |
---|
187 | |
---|
188 | _Scheduler_SMP_Allocate_processor( highest_ready, victim ); |
---|
189 | |
---|
190 | ( *move_from_ready_to_scheduled )( self, highest_ready ); |
---|
191 | } |
---|
192 | |
---|
193 | static inline void _Scheduler_SMP_Block( |
---|
194 | Scheduler_SMP_Control *self, |
---|
195 | Thread_Control *thread, |
---|
196 | Scheduler_SMP_Extract extract, |
---|
197 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
198 | Scheduler_SMP_Move move_from_ready_to_scheduled |
---|
199 | ) |
---|
200 | { |
---|
201 | ( *extract )( self, thread ); |
---|
202 | |
---|
203 | if ( thread->is_in_the_air ) { |
---|
204 | thread->is_in_the_air = false; |
---|
205 | |
---|
206 | _Scheduler_SMP_Schedule_highest_ready( |
---|
207 | self, |
---|
208 | thread, |
---|
209 | get_highest_ready, |
---|
210 | move_from_ready_to_scheduled |
---|
211 | ); |
---|
212 | } |
---|
213 | } |
---|
214 | |
---|
215 | static inline void _Scheduler_SMP_Extract( |
---|
216 | Scheduler_SMP_Control *self, |
---|
217 | Thread_Control *thread, |
---|
218 | Scheduler_SMP_Extract extract |
---|
219 | ) |
---|
220 | { |
---|
221 | ( *extract )( self, thread ); |
---|
222 | } |
---|
223 | |
---|
224 | static inline void _Scheduler_SMP_Schedule( |
---|
225 | Scheduler_SMP_Control *self, |
---|
226 | Thread_Control *thread, |
---|
227 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
228 | Scheduler_SMP_Move move_from_ready_to_scheduled |
---|
229 | ) |
---|
230 | { |
---|
231 | if ( thread->is_in_the_air ) { |
---|
232 | thread->is_in_the_air = false; |
---|
233 | |
---|
234 | _Scheduler_SMP_Schedule_highest_ready( |
---|
235 | self, |
---|
236 | thread, |
---|
237 | get_highest_ready, |
---|
238 | move_from_ready_to_scheduled |
---|
239 | ); |
---|
240 | } |
---|
241 | } |
---|
242 | |
---|
243 | static inline void _Scheduler_SMP_Insert_scheduled_lifo( |
---|
244 | Scheduler_SMP_Control *self, |
---|
245 | Thread_Control *thread |
---|
246 | ) |
---|
247 | { |
---|
248 | _Chain_Insert_ordered_unprotected( |
---|
249 | &self->Scheduled, |
---|
250 | &thread->Object.Node, |
---|
251 | _Scheduler_simple_Insert_priority_lifo_order |
---|
252 | ); |
---|
253 | } |
---|
254 | |
---|
255 | static inline void _Scheduler_SMP_Insert_scheduled_fifo( |
---|
256 | Scheduler_SMP_Control *self, |
---|
257 | Thread_Control *thread |
---|
258 | ) |
---|
259 | { |
---|
260 | _Chain_Insert_ordered_unprotected( |
---|
261 | &self->Scheduled, |
---|
262 | &thread->Object.Node, |
---|
263 | _Scheduler_simple_Insert_priority_fifo_order |
---|
264 | ); |
---|
265 | } |
---|
266 | |
---|
267 | static inline void _Scheduler_SMP_Start_idle( |
---|
268 | Scheduler_SMP_Control *self, |
---|
269 | Thread_Control *thread, |
---|
270 | Per_CPU_Control *cpu |
---|
271 | ) |
---|
272 | { |
---|
273 | thread->is_scheduled = true; |
---|
274 | _Thread_Set_CPU( thread, cpu ); |
---|
275 | _Chain_Append_unprotected( &self->Scheduled, &thread->Object.Node ); |
---|
276 | } |
---|
277 | |
---|
278 | /** @} */ |
---|
279 | |
---|
280 | #ifdef __cplusplus |
---|
281 | } |
---|
282 | #endif /* __cplusplus */ |
---|
283 | |
---|
284 | #endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */ |
---|