source: rtems/cpukit/score/src/schedulerprioritysmp.c @ 4a0e418

Last change on this file since 4a0e418 was 4a0e418, checked in by Joel Sherrill <joel@…>, on 02/16/22 at 21:09:20

score/src/[n-s]*.c: Change license to BSD-2

Updates #3053.

  • Property mode set to 100644
File size: 10.7 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSScoreSchedulerPrioritySMP
7 *
8 * @brief This source file contains the implementation of
9 *   _Scheduler_priority_SMP_Add_processor(),
10 *   _Scheduler_priority_SMP_Ask_for_help(), _Scheduler_priority_SMP_Block(),
11 *   _Scheduler_priority_SMP_Initialize(),
12 *   _Scheduler_priority_SMP_Node_initialize(),
13 *   _Scheduler_priority_SMP_Reconsider_help_request(),
14 *   _Scheduler_priority_SMP_Remove_processor(),
15 *   _Scheduler_priority_SMP_Unblock(),
16 *   _Scheduler_priority_SMP_Update_priority(),
17 *   _Scheduler_priority_SMP_Withdraw_node(),
18 *   _Scheduler_priority_SMP_Make_sticky(),
19 *   _Scheduler_priority_SMP_Clean_sticky(), and
20 *   _Scheduler_priority_SMP_Yield().
21 */
22
23/*
24 * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
25 *
26 *  embedded brains GmbH
27 *  Dornierstr. 4
28 *  82178 Puchheim
29 *  Germany
30 *  <rtems@embedded-brains.de>
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 *    notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 *    notice, this list of conditions and the following disclaimer in the
39 *    documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
42 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
45 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
46 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
47 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
48 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
49 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
50 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
51 * POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#ifdef HAVE_CONFIG_H
55#include "config.h"
56#endif
57
58#include <rtems/score/schedulerprioritysmpimpl.h>
59
60static Scheduler_priority_SMP_Context *
61_Scheduler_priority_SMP_Get_context( const Scheduler_Control *scheduler )
62{
63  return (Scheduler_priority_SMP_Context *) _Scheduler_Get_context( scheduler );
64}
65
66void _Scheduler_priority_SMP_Initialize( const Scheduler_Control *scheduler )
67{
68  Scheduler_priority_SMP_Context *self =
69    _Scheduler_priority_SMP_Get_context( scheduler );
70
71  _Scheduler_SMP_Initialize( &self->Base );
72  self->idle_ready_queue = &self->Ready[ scheduler->maximum_priority ];
73  _Priority_bit_map_Initialize( &self->Bit_map );
74  _Scheduler_priority_Ready_queue_initialize(
75    &self->Ready[ 0 ],
76    scheduler->maximum_priority
77  );
78}
79
80void _Scheduler_priority_SMP_Node_initialize(
81  const Scheduler_Control *scheduler,
82  Scheduler_Node          *node,
83  Thread_Control          *the_thread,
84  Priority_Control         priority
85)
86{
87  Scheduler_Context              *context;
88  Scheduler_priority_SMP_Context *self;
89  Scheduler_priority_SMP_Node    *the_node;
90
91  the_node = _Scheduler_priority_SMP_Node_downcast( node );
92  _Scheduler_SMP_Node_initialize(
93    scheduler,
94    &the_node->Base,
95    the_thread,
96    priority
97  );
98
99  context = _Scheduler_Get_context( scheduler );
100  self = _Scheduler_priority_SMP_Get_self( context );
101  _Scheduler_priority_Ready_queue_update(
102    &the_node->Ready_queue,
103    SCHEDULER_PRIORITY_UNMAP( priority ),
104    &self->Bit_map,
105    &self->Ready[ 0 ]
106  );
107}
108
109static Scheduler_Node *_Scheduler_priority_SMP_Get_highest_ready(
110  Scheduler_Context *context,
111  Scheduler_Node    *node
112)
113{
114  Scheduler_priority_SMP_Context *self =
115    _Scheduler_priority_SMP_Get_self( context );
116
117  (void) node;
118
119  return (Scheduler_Node *) _Scheduler_priority_Ready_queue_first(
120    &self->Bit_map,
121    &self->Ready[ 0 ]
122  );
123}
124
125void _Scheduler_priority_SMP_Block(
126  const Scheduler_Control *scheduler,
127  Thread_Control          *thread,
128  Scheduler_Node          *node
129)
130{
131  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
132
133  _Scheduler_SMP_Block(
134    context,
135    thread,
136    node,
137    _Scheduler_SMP_Extract_from_scheduled,
138    _Scheduler_priority_SMP_Extract_from_ready,
139    _Scheduler_priority_SMP_Get_highest_ready,
140    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
141    _Scheduler_SMP_Allocate_processor_lazy,
142    _Scheduler_priority_SMP_Get_idle
143  );
144}
145
146static bool _Scheduler_priority_SMP_Enqueue(
147  Scheduler_Context *context,
148  Scheduler_Node    *node,
149  Priority_Control   insert_priority
150)
151{
152  return _Scheduler_SMP_Enqueue(
153    context,
154    node,
155    insert_priority,
156    _Scheduler_SMP_Priority_less_equal,
157    _Scheduler_priority_SMP_Insert_ready,
158    _Scheduler_SMP_Insert_scheduled,
159    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
160    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
161    _Scheduler_SMP_Get_lowest_scheduled,
162    _Scheduler_SMP_Allocate_processor_lazy,
163    _Scheduler_priority_SMP_Get_idle,
164    _Scheduler_priority_SMP_Release_idle
165  );
166}
167
168static void _Scheduler_priority_SMP_Enqueue_scheduled(
169  Scheduler_Context *context,
170  Scheduler_Node    *node,
171  Priority_Control   insert_priority
172)
173{
174  _Scheduler_SMP_Enqueue_scheduled(
175    context,
176    node,
177    insert_priority,
178    _Scheduler_SMP_Priority_less_equal,
179    _Scheduler_priority_SMP_Extract_from_ready,
180    _Scheduler_priority_SMP_Get_highest_ready,
181    _Scheduler_priority_SMP_Insert_ready,
182    _Scheduler_SMP_Insert_scheduled,
183    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
184    _Scheduler_SMP_Allocate_processor_lazy,
185    _Scheduler_priority_SMP_Get_idle,
186    _Scheduler_priority_SMP_Release_idle
187  );
188}
189
190void _Scheduler_priority_SMP_Unblock(
191  const Scheduler_Control *scheduler,
192  Thread_Control          *thread,
193  Scheduler_Node          *node
194)
195{
196  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
197
198  _Scheduler_SMP_Unblock(
199    context,
200    thread,
201    node,
202    _Scheduler_priority_SMP_Do_update,
203    _Scheduler_priority_SMP_Enqueue,
204    _Scheduler_priority_SMP_Release_idle
205  );
206}
207
208static bool _Scheduler_priority_SMP_Do_ask_for_help(
209  Scheduler_Context *context,
210  Thread_Control    *the_thread,
211  Scheduler_Node    *node
212)
213{
214  return _Scheduler_SMP_Ask_for_help(
215    context,
216    the_thread,
217    node,
218    _Scheduler_SMP_Priority_less_equal,
219    _Scheduler_priority_SMP_Insert_ready,
220    _Scheduler_SMP_Insert_scheduled,
221    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
222    _Scheduler_SMP_Get_lowest_scheduled,
223    _Scheduler_SMP_Allocate_processor_lazy,
224    _Scheduler_priority_SMP_Release_idle
225  );
226}
227
228void _Scheduler_priority_SMP_Update_priority(
229  const Scheduler_Control *scheduler,
230  Thread_Control          *thread,
231  Scheduler_Node          *node
232)
233{
234  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
235
236  _Scheduler_SMP_Update_priority(
237    context,
238    thread,
239    node,
240    _Scheduler_SMP_Extract_from_scheduled,
241    _Scheduler_priority_SMP_Extract_from_ready,
242    _Scheduler_priority_SMP_Do_update,
243    _Scheduler_priority_SMP_Enqueue,
244    _Scheduler_priority_SMP_Enqueue_scheduled,
245    _Scheduler_priority_SMP_Do_ask_for_help
246  );
247}
248
249bool _Scheduler_priority_SMP_Ask_for_help(
250  const Scheduler_Control *scheduler,
251  Thread_Control          *the_thread,
252  Scheduler_Node          *node
253)
254{
255  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
256
257  return _Scheduler_priority_SMP_Do_ask_for_help( context, the_thread, node );
258}
259
260void _Scheduler_priority_SMP_Reconsider_help_request(
261  const Scheduler_Control *scheduler,
262  Thread_Control          *the_thread,
263  Scheduler_Node          *node
264)
265{
266  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
267
268  _Scheduler_SMP_Reconsider_help_request(
269    context,
270    the_thread,
271    node,
272    _Scheduler_priority_SMP_Extract_from_ready
273  );
274}
275
276void _Scheduler_priority_SMP_Withdraw_node(
277  const Scheduler_Control *scheduler,
278  Thread_Control          *the_thread,
279  Scheduler_Node          *node,
280  Thread_Scheduler_state   next_state
281)
282{
283  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
284
285  _Scheduler_SMP_Withdraw_node(
286    context,
287    the_thread,
288    node,
289    next_state,
290    _Scheduler_SMP_Extract_from_scheduled,
291    _Scheduler_priority_SMP_Extract_from_ready,
292    _Scheduler_priority_SMP_Get_highest_ready,
293    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
294    _Scheduler_SMP_Allocate_processor_lazy,
295    _Scheduler_priority_SMP_Get_idle
296  );
297}
298
299void _Scheduler_priority_SMP_Make_sticky(
300  const Scheduler_Control *scheduler,
301  Thread_Control          *the_thread,
302  Scheduler_Node          *node
303)
304{
305  _Scheduler_SMP_Make_sticky(
306    scheduler,
307    the_thread,
308    node,
309    _Scheduler_priority_SMP_Do_update,
310    _Scheduler_priority_SMP_Enqueue
311  );
312}
313
314void _Scheduler_priority_SMP_Clean_sticky(
315  const Scheduler_Control *scheduler,
316  Thread_Control          *the_thread,
317  Scheduler_Node          *node
318)
319{
320  _Scheduler_SMP_Clean_sticky(
321    scheduler,
322    the_thread,
323    node,
324    _Scheduler_SMP_Extract_from_scheduled,
325    _Scheduler_priority_SMP_Extract_from_ready,
326    _Scheduler_priority_SMP_Get_highest_ready,
327    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
328    _Scheduler_SMP_Allocate_processor_lazy,
329    _Scheduler_priority_SMP_Get_idle,
330    _Scheduler_priority_SMP_Release_idle
331  );
332}
333
334void _Scheduler_priority_SMP_Add_processor(
335  const Scheduler_Control *scheduler,
336  Thread_Control          *idle
337)
338{
339  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
340
341  _Scheduler_SMP_Add_processor(
342    context,
343    idle,
344    _Scheduler_priority_SMP_Has_ready,
345    _Scheduler_priority_SMP_Enqueue_scheduled,
346    _Scheduler_SMP_Do_nothing_register_idle
347  );
348}
349
350Thread_Control *_Scheduler_priority_SMP_Remove_processor(
351  const Scheduler_Control *scheduler,
352  Per_CPU_Control         *cpu
353)
354{
355  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
356
357  return _Scheduler_SMP_Remove_processor(
358    context,
359    cpu,
360    _Scheduler_SMP_Extract_from_scheduled,
361    _Scheduler_priority_SMP_Extract_from_ready,
362    _Scheduler_priority_SMP_Enqueue,
363    _Scheduler_priority_SMP_Get_idle,
364    _Scheduler_priority_SMP_Release_idle
365  );
366}
367
368void _Scheduler_priority_SMP_Yield(
369  const Scheduler_Control *scheduler,
370  Thread_Control          *thread,
371  Scheduler_Node          *node
372)
373{
374  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
375
376  _Scheduler_SMP_Yield(
377    context,
378    thread,
379    node,
380    _Scheduler_SMP_Extract_from_scheduled,
381    _Scheduler_priority_SMP_Extract_from_ready,
382    _Scheduler_priority_SMP_Enqueue,
383    _Scheduler_priority_SMP_Enqueue_scheduled
384  );
385}
Note: See TracBrowser for help on using the repository browser.