source: rtems/cpukit/rtems/src/schedulerremoveprocessor.c @ b3b6d21e

Last change on this file since b3b6d21e was b3b6d21e, checked in by Joel Sherrill <joel@…>, on 02/16/22 at 22:28:59

cpukit/rtems/src/[s-z]*.c: Change license to BSD-2

Updates #3053.

  • Property mode set to 100644
File size: 6.9 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSImplClassicScheduler
7 *
8 * @brief This source file contains the implementation of
9 *   rtems_scheduler_remove_processor().
10 */
11
12/*
13 * Copyright (c) 2016 embedded brains GmbH.  All rights reserved.
14 *
15 *  embedded brains GmbH
16 *  Dornierstr. 4
17 *  82178 Puchheim
18 *  Germany
19 *  <rtems@embedded-brains.de>
20 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
23 * are met:
24 * 1. Redistributions of source code must retain the above copyright
25 *    notice, this list of conditions and the following disclaimer.
26 * 2. Redistributions in binary form must reproduce the above copyright
27 *    notice, this list of conditions and the following disclaimer in the
28 *    documentation and/or other materials provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#ifdef HAVE_CONFIG_H
44#include "config.h"
45#endif
46
47#include <rtems/rtems/scheduler.h>
48#include <rtems/score/schedulerimpl.h>
49#include <rtems/config.h>
50
51#if defined(RTEMS_SMP)
52typedef struct {
53  const Scheduler_Control *scheduler;
54  rtems_status_code        status;
55} Scheduler_Processor_removal_context;
56
57static bool _Scheduler_Check_processor_not_required(
58  Thread_Control *the_thread,
59  void           *arg
60)
61{
62  Scheduler_Processor_removal_context *iter_context;
63  Thread_queue_Context                 queue_context;
64  ISR_lock_Context                     state_context;
65
66  if ( the_thread->is_idle ) {
67    return false;
68  }
69
70  iter_context = arg;
71
72  _Thread_queue_Context_initialize( &queue_context );
73  _Thread_Wait_acquire( the_thread, &queue_context );
74  _Thread_State_acquire_critical( the_thread, &state_context );
75
76  if (
77    _Thread_Scheduler_get_home( the_thread ) == iter_context->scheduler
78      && !_Processor_mask_Has_overlap(
79        &the_thread->Scheduler.Affinity,
80        _Scheduler_Get_processors( iter_context->scheduler )
81      )
82  ) {
83    iter_context->status = RTEMS_RESOURCE_IN_USE;
84  }
85
86  _Thread_State_release_critical( the_thread, &state_context );
87  _Thread_Wait_release( the_thread, &queue_context );
88  return iter_context->status != RTEMS_SUCCESSFUL;
89}
90
91static bool _Scheduler_Check_no_helping(
92  Thread_Control *the_thread,
93  void           *arg
94)
95{
96  Scheduler_Processor_removal_context *iter_context;
97  ISR_lock_Context                     lock_context;
98  const Chain_Node                    *node;
99  const Chain_Node                    *tail;
100
101  if ( the_thread->is_idle ) {
102    return false;
103  }
104
105  iter_context = arg;
106
107  _Thread_State_acquire( the_thread, &lock_context );
108  node = _Chain_Immutable_first( &the_thread->Scheduler.Scheduler_nodes );
109  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
110
111  do {
112    const Scheduler_Node    *scheduler_node;
113    const Scheduler_Control *scheduler;
114
115    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
116    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
117
118    if ( scheduler == iter_context->scheduler ) {
119      iter_context->status = RTEMS_RESOURCE_IN_USE;
120      break;
121    }
122
123    node = _Chain_Immutable_next( node );
124  } while ( node != tail );
125
126  _Thread_State_release( the_thread, &lock_context );
127  return iter_context->status != RTEMS_SUCCESSFUL;
128}
129#endif
130
131rtems_status_code rtems_scheduler_remove_processor(
132  rtems_id scheduler_id,
133  uint32_t cpu_index
134)
135{
136  const Scheduler_Control             *scheduler;
137#if defined(RTEMS_SMP)
138  Scheduler_Processor_removal_context  iter_context;
139  ISR_lock_Context                     lock_context;
140  Scheduler_Context                   *scheduler_context;
141  Per_CPU_Control                     *cpu;
142  Per_CPU_Control                     *cpu_self;
143#endif
144
145  scheduler = _Scheduler_Get_by_id( scheduler_id );
146  if ( scheduler == NULL ) {
147    return RTEMS_INVALID_ID;
148  }
149
150  if ( cpu_index >= rtems_configuration_get_maximum_processors() ) {
151    return RTEMS_INVALID_NUMBER;
152  }
153
154#if defined(RTEMS_SMP)
155  iter_context.scheduler = scheduler;
156  iter_context.status = RTEMS_SUCCESSFUL;
157  scheduler_context = _Scheduler_Get_context( scheduler );
158  cpu = _Per_CPU_Get_by_index( cpu_index );
159
160  _Objects_Allocator_lock();
161
162  if ( cpu->Scheduler.control != scheduler ) {
163    _Objects_Allocator_unlock();
164    return RTEMS_INVALID_NUMBER;
165  }
166
167  /*
168   * This prevents the selection of this scheduler instance by new threads in
169   * case the processor count changes to zero.
170   */
171  _ISR_lock_ISR_disable( &lock_context );
172  _Scheduler_Acquire_critical( scheduler, &lock_context );
173  _Processor_mask_Clear( &scheduler_context->Processors, cpu_index );
174  _Scheduler_Release_critical( scheduler, &lock_context );
175  _ISR_lock_ISR_enable( &lock_context );
176
177  _Thread_Iterate( _Scheduler_Check_processor_not_required, &iter_context );
178
179  if (
180    _Processor_mask_Is_zero( &scheduler_context->Processors ) &&
181    iter_context.status == RTEMS_SUCCESSFUL
182  ) {
183    _Thread_Iterate( _Scheduler_Check_no_helping, &iter_context );
184  }
185
186  _ISR_lock_ISR_disable( &lock_context );
187  _Scheduler_Acquire_critical( scheduler, &lock_context );
188
189  if ( iter_context.status == RTEMS_SUCCESSFUL ) {
190    Thread_Control *idle;
191    Scheduler_Node *scheduler_node;
192
193    cpu->Scheduler.control = NULL;
194    cpu->Scheduler.context = NULL;
195    idle = ( *scheduler->Operations.remove_processor )( scheduler, cpu );
196    cpu->Scheduler.idle_if_online_and_unused = idle;
197
198    scheduler_node = _Thread_Scheduler_get_home_node( idle );
199    _Priority_Plain_extract(
200      &scheduler_node->Wait.Priority,
201      &idle->Real_priority
202    );
203    _Assert( _Priority_Is_empty( &scheduler_node->Wait.Priority ) );
204    _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
205    _Assert( _Chain_Is_empty( &idle->Scheduler.Wait_nodes ) );
206    _Chain_Extract_unprotected( &scheduler_node->Thread.Scheduler_node.Chain );
207    _Assert( _Chain_Is_empty( &idle->Scheduler.Scheduler_nodes ) );
208  } else {
209    _Processor_mask_Set( &scheduler_context->Processors, cpu_index );
210  }
211
212  cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
213  _Scheduler_Release_critical( scheduler, &lock_context );
214  _ISR_lock_ISR_enable( &lock_context );
215  _Thread_Dispatch_direct( cpu_self );
216  _Objects_Allocator_unlock();
217  return iter_context.status;
218#else
219  return RTEMS_RESOURCE_IN_USE;
220#endif
221}
Note: See TracBrowser for help on using the repository browser.