source: rtems/cpukit/score/include/rtems/score/mrspimpl.h @ 114e408

5
Last change on this file since 114e408 was 114e408, checked in by Sebastian Huber <sebastian.huber@…>, on 08/22/16 at 11:17:05

score: Simplify thread queue acquire/release

  • Property mode set to 100644
File size: 12.6 KB
Line 
1/*
2 * Copyright (c) 2014, 2016 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifndef _RTEMS_SCORE_MRSPIMPL_H
16#define _RTEMS_SCORE_MRSPIMPL_H
17
18#include <rtems/score/mrsp.h>
19
20#if defined(RTEMS_SMP)
21
22#include <rtems/score/assert.h>
23#include <rtems/score/chainimpl.h>
24#include <rtems/score/resourceimpl.h>
25#include <rtems/score/schedulerimpl.h>
26#include <rtems/score/status.h>
27#include <rtems/score/threadqimpl.h>
28#include <rtems/score/watchdogimpl.h>
29#include <rtems/score/wkspace.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreMRSP
37 *
38 * @{
39 */
40
41/**
42 * @brief Internal state used for MRSP_Rival::status to indicate that this
43 * rival waits for resource ownership.
44 */
45#define MRSP_WAIT_FOR_OWNERSHIP STATUS_MINUS_ONE
46
47/*
48 * FIXME: Operations with the resource dependency tree are protected by the
49 * global scheduler lock.  Since the scheduler lock should be scheduler
50 * instance specific in the future this will only work temporarily.  A more
51 * sophisticated locking strategy is necessary.
52 */
53
54RTEMS_INLINE_ROUTINE void _MRSP_Giant_acquire( ISR_lock_Context *lock_context )
55{
56  _ISR_lock_Acquire( &_Scheduler_Lock, lock_context );
57}
58
59RTEMS_INLINE_ROUTINE void _MRSP_Giant_release( ISR_lock_Context *lock_context )
60{
61  _ISR_lock_Release( &_Scheduler_Lock, lock_context );
62}
63
64RTEMS_INLINE_ROUTINE void _MRSP_Acquire_critical(
65  MRSP_Control         *mrsp,
66  Thread_queue_Context *queue_context
67)
68{
69  _Thread_queue_Acquire_critical( &mrsp->Wait_queue, queue_context );
70}
71
72RTEMS_INLINE_ROUTINE void _MRSP_Release(
73  MRSP_Control         *mrsp,
74  Thread_queue_Context *queue_context
75)
76{
77  _Thread_queue_Release( &mrsp->Wait_queue, queue_context );
78}
79
80RTEMS_INLINE_ROUTINE bool _MRSP_Restore_priority_filter(
81  Thread_Control   *thread,
82  Priority_Control *new_priority,
83  void             *arg
84)
85{
86  *new_priority = _Thread_Priority_highest(
87    thread->real_priority,
88    *new_priority
89  );
90
91  return *new_priority != thread->current_priority;
92}
93
94RTEMS_INLINE_ROUTINE void _MRSP_Restore_priority(
95  Thread_Control   *thread,
96  Priority_Control  initial_priority
97)
98{
99  /*
100   * The Thread_Control::resource_count is used by the normal priority ceiling
101   * or priority inheritance semaphores.
102   */
103  if ( thread->resource_count == 0 ) {
104    _Thread_Change_priority(
105      thread,
106      initial_priority,
107      NULL,
108      _MRSP_Restore_priority_filter,
109      true
110    );
111  }
112}
113
114RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership(
115  MRSP_Control         *mrsp,
116  Thread_Control       *new_owner,
117  Priority_Control      initial_priority,
118  Priority_Control      ceiling_priority,
119  Thread_queue_Context *queue_context
120)
121{
122  Per_CPU_Control *cpu_self;
123
124  _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
125  _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
126  mrsp->initial_priority_of_owner = initial_priority;
127  _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
128
129  cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
130  _MRSP_Release( mrsp, queue_context );
131
132  _Thread_Raise_priority( new_owner, ceiling_priority );
133
134  _Thread_Dispatch_enable( cpu_self );
135}
136
137RTEMS_INLINE_ROUTINE Status_Control _MRSP_Initialize(
138  MRSP_Control            *mrsp,
139  const Scheduler_Control *scheduler,
140  Priority_Control         ceiling_priority,
141  Thread_Control          *executing,
142  bool                     initially_locked
143)
144{
145  uint32_t scheduler_count = _Scheduler_Count;
146  uint32_t i;
147
148  if ( initially_locked ) {
149    return STATUS_INVALID_NUMBER;
150  }
151
152  mrsp->ceiling_priorities = _Workspace_Allocate(
153    sizeof( *mrsp->ceiling_priorities ) * scheduler_count
154  );
155  if ( mrsp->ceiling_priorities == NULL ) {
156    return STATUS_NO_MEMORY;
157  }
158
159  for ( i = 0 ; i < scheduler_count ; ++i ) {
160    const Scheduler_Control *scheduler_of_cpu;
161
162    scheduler_of_cpu = _Scheduler_Get_by_CPU_index( i );
163
164    if ( scheduler != scheduler_of_cpu ) {
165      mrsp->ceiling_priorities[ i ] =
166        _Scheduler_Map_priority( scheduler_of_cpu, 0 );
167    } else {
168      mrsp->ceiling_priorities[ i ] = ceiling_priority;
169    }
170  }
171
172  _Resource_Initialize( &mrsp->Resource );
173  _Chain_Initialize_empty( &mrsp->Rivals );
174  _Thread_queue_Initialize( &mrsp->Wait_queue );
175
176  return STATUS_SUCCESSFUL;
177}
178
179RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_priority(
180  const MRSP_Control      *mrsp,
181  const Scheduler_Control *scheduler
182)
183{
184  uint32_t scheduler_index;
185
186  scheduler_index = _Scheduler_Get_index( scheduler );
187  return mrsp->ceiling_priorities[ scheduler_index ];
188}
189
190RTEMS_INLINE_ROUTINE void _MRSP_Set_priority(
191  MRSP_Control            *mrsp,
192  const Scheduler_Control *scheduler,
193  Priority_Control         new_priority
194)
195{
196  uint32_t scheduler_index;
197
198  scheduler_index = _Scheduler_Get_index( scheduler );
199  mrsp->ceiling_priorities[ scheduler_index ] = new_priority;
200}
201
202RTEMS_INLINE_ROUTINE void _MRSP_Timeout( Watchdog_Control *watchdog )
203{
204  MRSP_Rival *rival = RTEMS_CONTAINER_OF( watchdog, MRSP_Rival, Watchdog );
205  MRSP_Control *mrsp = rival->resource;
206  Thread_Control *thread = rival->thread;
207  Thread_queue_Context queue_context;
208
209  _Thread_queue_Context_initialize( &queue_context );
210  _ISR_lock_ISR_disable( &queue_context.Lock_context );
211  _MRSP_Acquire_critical( mrsp, &queue_context );
212
213  if ( rival->status == MRSP_WAIT_FOR_OWNERSHIP ) {
214    ISR_lock_Context giant_lock_context;
215
216    _MRSP_Giant_acquire( &giant_lock_context );
217
218    _Chain_Extract_unprotected( &rival->Node );
219    _Resource_Node_extract( &thread->Resource_node );
220    _Resource_Node_set_dependency( &thread->Resource_node, NULL );
221    _Scheduler_Thread_change_help_state( thread, rival->initial_help_state );
222    _Scheduler_Thread_change_resource_root( thread, thread );
223
224    _MRSP_Giant_release( &giant_lock_context );
225
226    rival->status = STATUS_TIMEOUT;
227
228    _MRSP_Release( mrsp, &queue_context );
229  } else {
230    _MRSP_Release( mrsp, &queue_context );
231  }
232}
233
234RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
235  MRSP_Control         *mrsp,
236  Resource_Node        *owner,
237  Thread_Control       *executing,
238  Priority_Control      initial_priority,
239  Priority_Control      ceiling_priority,
240  Thread_queue_Context *queue_context
241)
242{
243  Status_Control status;
244  MRSP_Rival rival;
245  Thread_Life_state life_state;
246  Per_CPU_Control *cpu_self;
247  ISR_lock_Context giant_lock_context;
248  ISR_Level level;
249  Watchdog_Interval timeout = queue_context->timeout;
250  _Assert( queue_context->timeout_discipline == WATCHDOG_RELATIVE );
251
252  rival.thread = executing;
253  rival.resource = mrsp;
254  rival.initial_priority = initial_priority;
255
256  _MRSP_Giant_acquire( &giant_lock_context );
257
258  rival.initial_help_state =
259    _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_ACTIVE_RIVAL );
260  rival.status = MRSP_WAIT_FOR_OWNERSHIP;
261
262  _Chain_Initialize_node( &rival.Node );
263  _Chain_Append_unprotected( &mrsp->Rivals, &rival.Node );
264  _Resource_Add_rival( &mrsp->Resource, &executing->Resource_node );
265  _Resource_Node_set_dependency( &executing->Resource_node, &mrsp->Resource );
266  _Scheduler_Thread_change_resource_root(
267    executing,
268    THREAD_RESOURCE_NODE_TO_THREAD( _Resource_Node_get_root( owner ) )
269  );
270
271  _MRSP_Giant_release( &giant_lock_context );
272
273  cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
274  _MRSP_Release( mrsp, queue_context );
275
276  _Thread_Raise_priority( executing, ceiling_priority );
277
278  if ( timeout > 0 ) {
279    _Watchdog_Preinitialize( &rival.Watchdog, cpu_self );
280    _Watchdog_Initialize( &rival.Watchdog, _MRSP_Timeout );
281    _ISR_Local_disable( level );
282    _Watchdog_Per_CPU_insert_relative( &rival.Watchdog, cpu_self, timeout );
283    _ISR_Local_enable( level );
284  }
285
286  life_state = _Thread_Set_life_protection( THREAD_LIFE_PROTECTED );
287  _Thread_Dispatch_enable( cpu_self );
288
289  _Assert( _Debug_Is_thread_dispatching_allowed() );
290
291  /* Wait for state change */
292  do {
293    status = rival.status;
294  } while ( status == MRSP_WAIT_FOR_OWNERSHIP );
295
296  _Thread_Set_life_protection( life_state );
297
298  if ( timeout > 0 ) {
299    _ISR_Local_disable( level );
300    _Watchdog_Per_CPU_remove(
301      &rival.Watchdog,
302      cpu_self,
303      &cpu_self->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ]
304    );
305    _ISR_Local_enable( level );
306
307    if ( status == STATUS_TIMEOUT ) {
308      _MRSP_Restore_priority( executing, initial_priority );
309    }
310  }
311
312  return status;
313}
314
315RTEMS_INLINE_ROUTINE Status_Control _MRSP_Seize(
316  MRSP_Control         *mrsp,
317  Thread_Control       *executing,
318  bool                  wait,
319  Thread_queue_Context *queue_context
320)
321{
322  Status_Control status;
323  const Scheduler_Control *scheduler = _Scheduler_Get_own( executing );
324  Priority_Control initial_priority = executing->current_priority;
325  Priority_Control ceiling_priority = _MRSP_Get_priority( mrsp, scheduler );
326  bool priority_ok = !_Thread_Priority_less_than(
327    ceiling_priority,
328    initial_priority
329  );
330  Resource_Node *owner;
331
332  if ( !priority_ok) {
333    _ISR_lock_ISR_enable( &queue_context->Lock_context );
334    return STATUS_MUTEX_CEILING_VIOLATED;
335  }
336
337  _MRSP_Acquire_critical( mrsp, queue_context );
338  owner = _Resource_Get_owner( &mrsp->Resource );
339  if ( owner == NULL ) {
340    _MRSP_Claim_ownership(
341      mrsp,
342      executing,
343      initial_priority,
344      ceiling_priority,
345      queue_context
346    );
347    status = STATUS_SUCCESSFUL;
348  } else if (
349    wait
350      && _Resource_Node_get_root( owner ) != &executing->Resource_node
351  ) {
352    status = _MRSP_Wait_for_ownership(
353      mrsp,
354      owner,
355      executing,
356      initial_priority,
357      ceiling_priority,
358      queue_context
359    );
360  } else {
361    _MRSP_Release( mrsp, queue_context );
362    /* Not available, nested access or deadlock */
363    status = STATUS_UNAVAILABLE;
364  }
365
366  return status;
367}
368
369RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
370  MRSP_Control         *mrsp,
371  Thread_Control       *executing,
372  Thread_queue_Context *queue_context
373)
374{
375  Priority_Control initial_priority;
376  Per_CPU_Control *cpu_self;
377  ISR_lock_Context giant_lock_context;
378
379  if ( _Resource_Get_owner( &mrsp->Resource ) != &executing->Resource_node ) {
380    _ISR_lock_ISR_enable( &queue_context->Lock_context );
381    return STATUS_NOT_OWNER;
382  }
383
384  if (
385    !_Resource_Is_most_recently_obtained(
386      &mrsp->Resource,
387      &executing->Resource_node
388    )
389  ) {
390    _ISR_lock_ISR_enable( &queue_context->Lock_context );
391    return STATUS_RELEASE_ORDER_VIOLATION;
392  }
393
394  initial_priority = mrsp->initial_priority_of_owner;
395
396  _MRSP_Acquire_critical( mrsp, queue_context );
397
398  _MRSP_Giant_acquire( &giant_lock_context );
399
400  _Resource_Extract( &mrsp->Resource );
401
402  if ( _Chain_Is_empty( &mrsp->Rivals ) ) {
403    _Resource_Set_owner( &mrsp->Resource, NULL );
404  } else {
405    MRSP_Rival *rival = (MRSP_Rival *)
406      _Chain_Get_first_unprotected( &mrsp->Rivals );
407    Thread_Control *new_owner;
408
409    /*
410     * This must be inside the critical section since the status prevents a
411     * potential double extraction in _MRSP_Timeout().
412     */
413    rival->status = STATUS_SUCCESSFUL;
414
415    new_owner = rival->thread;
416    mrsp->initial_priority_of_owner = rival->initial_priority;
417    _Resource_Node_extract( &new_owner->Resource_node );
418    _Resource_Node_set_dependency( &new_owner->Resource_node, NULL );
419    _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
420    _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
421    _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
422    _Scheduler_Thread_change_resource_root( new_owner, new_owner );
423  }
424
425  if ( !_Resource_Node_owns_resources( &executing->Resource_node ) ) {
426    _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_YOURSELF );
427  }
428
429  _MRSP_Giant_release( &giant_lock_context );
430
431  cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
432  _MRSP_Release( mrsp, queue_context );
433
434  _MRSP_Restore_priority( executing, initial_priority );
435
436  _Thread_Dispatch_enable( cpu_self );
437
438  return STATUS_SUCCESSFUL;
439}
440
441RTEMS_INLINE_ROUTINE Status_Control _MRSP_Can_destroy( MRSP_Control *mrsp )
442{
443  if ( _Resource_Get_owner( &mrsp->Resource ) != NULL ) {
444    return STATUS_RESOURCE_IN_USE;
445  }
446
447  return STATUS_SUCCESSFUL;
448}
449
450RTEMS_INLINE_ROUTINE void _MRSP_Destroy(
451  MRSP_Control         *mrsp,
452  Thread_queue_Context *queue_context
453)
454{
455  _MRSP_Release( mrsp, queue_context );
456  _Thread_queue_Destroy( &mrsp->Wait_queue );
457  _Workspace_Free( mrsp->ceiling_priorities );
458}
459
460/** @} */
461
462#ifdef __cplusplus
463}
464#endif /* __cplusplus */
465
466#endif /* RTEMS_SMP */
467
468#endif /* _RTEMS_SCORE_MRSPIMPL_H */
Note: See TracBrowser for help on using the repository browser.