source: rtems/cpukit/score/include/rtems/score/coremuteximpl.h @ 77ff5599

5
Last change on this file since 77ff5599 was 77ff5599, checked in by Sebastian Huber <sebastian.huber@…>, on 06/10/16 at 06:48:54

score: Introduce map priority scheduler operation

Introduce map/unmap priority scheduler operations to map thread priority
values from/to the user domain to/from the scheduler domain. Use the
map priority operation to validate the thread priority. The EDF
schedulers use this new operation to distinguish between normal
priorities and priorities obtain through a job release.

Update #2173.
Update #2556.

  • Property mode set to 100644
File size: 14.1 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreMutex
5 *
6 * @brief CORE Mutex Implementation
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2009.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#ifndef _RTEMS_SCORE_COREMUTEXIMPL_H
19#define _RTEMS_SCORE_COREMUTEXIMPL_H
20
21#include <rtems/score/coremutex.h>
22#include <rtems/score/chainimpl.h>
23#include <rtems/score/schedulerimpl.h>
24#include <rtems/score/status.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/threadqimpl.h>
27
28#ifdef __cplusplus
29extern "C" {
30#endif
31
32/**
33 * @addtogroup ScoreMutex
34 */
35/**@{**/
36
37#define CORE_MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority
38
39RTEMS_INLINE_ROUTINE void _CORE_mutex_Initialize(
40  CORE_mutex_Control *the_mutex
41)
42{
43  _Thread_queue_Initialize( &the_mutex->Wait_queue );
44}
45
46RTEMS_INLINE_ROUTINE void _CORE_mutex_Destroy( CORE_mutex_Control *the_mutex )
47{
48  _Thread_queue_Destroy( &the_mutex->Wait_queue );
49}
50
51RTEMS_INLINE_ROUTINE void _CORE_mutex_Acquire_critical(
52  CORE_mutex_Control   *the_mutex,
53  Thread_queue_Context *queue_context
54)
55{
56  _Thread_queue_Acquire_critical(
57    &the_mutex->Wait_queue,
58    &queue_context->Lock_context
59  );
60}
61
62RTEMS_INLINE_ROUTINE void _CORE_mutex_Release(
63  CORE_mutex_Control   *the_mutex,
64  Thread_queue_Context *queue_context
65)
66{
67  _Thread_queue_Release(
68    &the_mutex->Wait_queue,
69    &queue_context->Lock_context
70  );
71}
72
73RTEMS_INLINE_ROUTINE Thread_Control *_CORE_mutex_Get_owner(
74  const CORE_mutex_Control *the_mutex
75)
76{
77  return the_mutex->Wait_queue.Queue.owner;
78}
79
80/**
81 * @brief Is mutex locked.
82 *
83 * This routine returns true if the mutex specified is locked and false
84 * otherwise.
85 *
86 * @param[in] the_mutex is the mutex to check.
87 *
88 * @retval true The mutex is locked.
89 * @retval false The mutex is not locked.
90 */
91RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_locked(
92  const CORE_mutex_Control *the_mutex
93)
94{
95  return _CORE_mutex_Get_owner( the_mutex ) != NULL;
96}
97
98Status_Control _CORE_mutex_Seize_slow(
99  CORE_mutex_Control   *the_mutex,
100  Thread_Control       *executing,
101  Thread_Control       *owner,
102  bool                  wait,
103  Watchdog_Interval     timeout,
104  Thread_queue_Context *queue_context
105);
106
107Status_Control _CORE_mutex_Seize_no_protocol_slow(
108  CORE_mutex_Control            *the_mutex,
109  const Thread_queue_Operations *operations,
110  Thread_Control                *executing,
111  bool                           wait,
112  Watchdog_Interval              timeout,
113  Thread_queue_Context          *queue_context
114);
115
116Status_Control _CORE_mutex_Surrender_slow(
117  CORE_mutex_Control   *the_mutex,
118  Thread_Control       *executing,
119  Thread_queue_Heads   *heads,
120  bool                  keep_priority,
121  Thread_queue_Context *queue_context
122);
123
124RTEMS_INLINE_ROUTINE void _CORE_mutex_Set_owner(
125  CORE_mutex_Control *the_mutex,
126  Thread_Control     *owner
127)
128{
129  the_mutex->Wait_queue.Queue.owner = owner;
130}
131
132RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_owner(
133  const CORE_mutex_Control *the_mutex,
134  const Thread_Control     *the_thread
135)
136{
137  return _CORE_mutex_Get_owner( the_mutex ) == the_thread;
138}
139
140RTEMS_INLINE_ROUTINE void _CORE_mutex_Restore_priority(
141  Thread_Control *executing
142)
143{
144  /*
145   *  Whether or not someone is waiting for the mutex, an
146   *  inherited priority must be lowered if this is the last
147   *  mutex (i.e. resource) this task has.
148   */
149  if ( !_Thread_Owns_resources( executing ) ) {
150    /*
151     * Ensure that the executing resource count is visible to all other
152     * processors and that we read the latest priority restore hint.
153     */
154    _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
155
156    if ( executing->priority_restore_hint ) {
157      Per_CPU_Control *cpu_self;
158
159      cpu_self = _Thread_Dispatch_disable();
160      _Thread_Restore_priority( executing );
161      _Thread_Dispatch_enable( cpu_self );
162    }
163  }
164}
165
166RTEMS_INLINE_ROUTINE void _CORE_recursive_mutex_Initialize(
167  CORE_recursive_mutex_Control *the_mutex
168)
169{
170  _CORE_mutex_Initialize( &the_mutex->Mutex );
171  the_mutex->nest_level = 0;
172}
173
174RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Seize_nested(
175  CORE_recursive_mutex_Control *the_mutex
176)
177{
178  ++the_mutex->nest_level;
179  return STATUS_SUCCESSFUL;
180}
181
182RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Seize(
183  CORE_recursive_mutex_Control  *the_mutex,
184  Thread_Control                *executing,
185  bool                           wait,
186  Watchdog_Interval              timeout,
187  Status_Control              ( *nested )( CORE_recursive_mutex_Control * ),
188  Thread_queue_Context          *queue_context
189)
190{
191  Thread_Control *owner;
192
193  _CORE_mutex_Acquire_critical( &the_mutex->Mutex, queue_context );
194
195  owner = _CORE_mutex_Get_owner( &the_mutex->Mutex );
196
197  if ( owner == NULL ) {
198    _CORE_mutex_Set_owner( &the_mutex->Mutex, executing );
199    ++executing->resource_count;
200    _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
201    return STATUS_SUCCESSFUL;
202  }
203
204  if ( owner == executing ) {
205    Status_Control status;
206
207    status = ( *nested )( the_mutex );
208    _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
209    return status;
210  }
211
212  return _CORE_mutex_Seize_slow(
213    &the_mutex->Mutex,
214    executing,
215    owner,
216    wait,
217    timeout,
218    queue_context
219  );
220}
221
222RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Surrender(
223  CORE_recursive_mutex_Control *the_mutex,
224  Thread_Control               *executing,
225  Thread_queue_Context         *queue_context
226)
227{
228  unsigned int        nest_level;
229  Thread_queue_Heads *heads;
230  bool                keep_priority;
231
232  _CORE_mutex_Acquire_critical( &the_mutex->Mutex, queue_context );
233
234  if ( !_CORE_mutex_Is_owner( &the_mutex->Mutex, executing ) ) {
235    _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
236    return STATUS_NOT_OWNER;
237  }
238
239  nest_level = the_mutex->nest_level;
240
241  if ( nest_level > 0 ) {
242    the_mutex->nest_level = nest_level - 1;
243    _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
244    return STATUS_SUCCESSFUL;
245  }
246
247  --executing->resource_count;
248  _CORE_mutex_Set_owner( &the_mutex->Mutex, NULL );
249
250  /*
251   * Ensure that the owner resource count is visible to all other
252   * processors and that we read the latest priority restore
253   * hint.
254   */
255  _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
256
257  heads = the_mutex->Mutex.Wait_queue.Queue.heads;
258  keep_priority = _Thread_Owns_resources( executing )
259    || !executing->priority_restore_hint;
260
261  if ( heads == NULL && keep_priority ) {
262    _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
263    return STATUS_SUCCESSFUL;
264  }
265
266  return _CORE_mutex_Surrender_slow(
267    &the_mutex->Mutex,
268    executing,
269    heads,
270    keep_priority,
271    queue_context
272  );
273}
274
275RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Seize_no_protocol(
276  CORE_recursive_mutex_Control  *the_mutex,
277  const Thread_queue_Operations *operations,
278  Thread_Control                *executing,
279  bool                           wait,
280  Watchdog_Interval              timeout,
281  Status_Control              ( *nested )( CORE_recursive_mutex_Control * ),
282  Thread_queue_Context          *queue_context
283)
284{
285  Thread_Control *owner;
286
287  _CORE_mutex_Acquire_critical( &the_mutex->Mutex, queue_context );
288
289  owner = _CORE_mutex_Get_owner( &the_mutex->Mutex );
290
291  if ( owner == NULL ) {
292    _CORE_mutex_Set_owner( &the_mutex->Mutex, executing );
293    _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
294    return STATUS_SUCCESSFUL;
295  }
296
297  if ( owner == executing ) {
298    Status_Control status;
299
300    status = ( *nested )( the_mutex );
301    _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
302    return status;
303  }
304
305  return _CORE_mutex_Seize_no_protocol_slow(
306    &the_mutex->Mutex,
307    operations,
308    executing,
309    wait,
310    timeout,
311    queue_context
312  );
313}
314
315RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Surrender_no_protocol(
316  CORE_recursive_mutex_Control  *the_mutex,
317  const Thread_queue_Operations *operations,
318  Thread_Control                *executing,
319  Thread_queue_Context          *queue_context
320)
321{
322  unsigned int    nest_level;
323  Thread_Control *new_owner;
324
325  _CORE_mutex_Acquire_critical( &the_mutex->Mutex, queue_context );
326
327  if ( !_CORE_mutex_Is_owner( &the_mutex->Mutex, executing ) ) {
328    _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
329    return STATUS_NOT_OWNER;
330  }
331
332  nest_level = the_mutex->nest_level;
333
334  if ( nest_level > 0 ) {
335    the_mutex->nest_level = nest_level - 1;
336    _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
337    return STATUS_SUCCESSFUL;
338  }
339
340  new_owner = _Thread_queue_First_locked(
341    &the_mutex->Mutex.Wait_queue,
342    operations
343  );
344  _CORE_mutex_Set_owner( &the_mutex->Mutex, new_owner );
345
346  if ( new_owner == NULL ) {
347    _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
348    return STATUS_SUCCESSFUL;
349  }
350
351  _Thread_queue_Extract_critical(
352    &the_mutex->Mutex.Wait_queue.Queue,
353    operations,
354    new_owner,
355    queue_context
356  );
357  return STATUS_SUCCESSFUL;
358}
359
360RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Initialize(
361  CORE_ceiling_mutex_Control *the_mutex,
362  const Scheduler_Control    *scheduler,
363  Priority_Control            priority_ceiling
364)
365{
366  _CORE_recursive_mutex_Initialize( &the_mutex->Recursive );
367  the_mutex->priority_ceiling = priority_ceiling;
368#if defined(RTEMS_SMP)
369  the_mutex->scheduler = scheduler;
370#endif
371}
372
373RTEMS_INLINE_ROUTINE const Scheduler_Control *
374_CORE_ceiling_mutex_Get_scheduler(
375  const CORE_ceiling_mutex_Control *the_mutex
376)
377{
378#if defined(RTEMS_SMP)
379  return the_mutex->scheduler;
380#else
381  return _Scheduler_Get_by_CPU_index( 0 );
382#endif
383}
384
385RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Set_priority(
386  CORE_ceiling_mutex_Control *the_mutex,
387  Priority_Control            priority_ceiling
388)
389{
390  the_mutex->priority_ceiling = priority_ceiling;
391}
392
393RTEMS_INLINE_ROUTINE Priority_Control _CORE_ceiling_mutex_Get_priority(
394  const CORE_ceiling_mutex_Control *the_mutex
395)
396{
397  return the_mutex->priority_ceiling;
398}
399
400RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Set_owner(
401  CORE_ceiling_mutex_Control *the_mutex,
402  Thread_Control             *owner,
403  Thread_queue_Context       *queue_context
404)
405{
406  Priority_Control  priority_ceiling;
407  Priority_Control  current_priority;
408  Per_CPU_Control  *cpu_self;
409
410  priority_ceiling = the_mutex->priority_ceiling;
411  current_priority = owner->current_priority;
412
413  if ( current_priority < priority_ceiling ) {
414    _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
415    return STATUS_MUTEX_CEILING_VIOLATED;
416  }
417
418  _CORE_mutex_Set_owner( &the_mutex->Recursive.Mutex, owner );
419  ++owner->resource_count;
420
421  if ( current_priority == priority_ceiling ) {
422    _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
423    return STATUS_SUCCESSFUL;
424  }
425
426  cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
427  _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
428  _Thread_Raise_priority( owner, priority_ceiling );
429  _Thread_Dispatch_enable( cpu_self );
430  return STATUS_SUCCESSFUL;
431}
432
433RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Seize(
434  CORE_ceiling_mutex_Control    *the_mutex,
435  Thread_Control                *executing,
436  bool                           wait,
437  Watchdog_Interval              timeout,
438  Status_Control              ( *nested )( CORE_recursive_mutex_Control * ),
439  Thread_queue_Context          *queue_context
440)
441{
442  Thread_Control *owner;
443
444  _CORE_mutex_Acquire_critical( &the_mutex->Recursive.Mutex, queue_context );
445
446  owner = _CORE_mutex_Get_owner( &the_mutex->Recursive.Mutex );
447
448  if ( owner == NULL ) {
449#if defined(RTEMS_SMP)
450    if (
451      _Scheduler_Get_own( executing )
452        != _CORE_ceiling_mutex_Get_scheduler( the_mutex )
453    ) {
454      _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
455      return STATUS_NOT_DEFINED;
456    }
457#endif
458
459    return _CORE_ceiling_mutex_Set_owner(
460      the_mutex,
461      executing,
462      queue_context
463    );
464  }
465
466  if ( owner == executing ) {
467    Status_Control status;
468
469    status = ( *nested )( &the_mutex->Recursive );
470    _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
471    return status;
472  }
473
474  return _CORE_mutex_Seize_no_protocol_slow(
475    &the_mutex->Recursive.Mutex,
476    CORE_MUTEX_TQ_OPERATIONS,
477    executing,
478    wait,
479    timeout,
480    queue_context
481  );
482}
483
484RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Surrender(
485  CORE_ceiling_mutex_Control *the_mutex,
486  Thread_Control             *executing,
487  Thread_queue_Context       *queue_context
488)
489{
490  unsigned int    nest_level;
491  Thread_Control *new_owner;
492
493  _CORE_mutex_Acquire_critical( &the_mutex->Recursive.Mutex, queue_context );
494
495  if ( !_CORE_mutex_Is_owner( &the_mutex->Recursive.Mutex, executing ) ) {
496    _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
497    return STATUS_NOT_OWNER;
498  }
499
500  nest_level = the_mutex->Recursive.nest_level;
501
502  if ( nest_level > 0 ) {
503    the_mutex->Recursive.nest_level = nest_level - 1;
504    _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
505    return STATUS_SUCCESSFUL;
506  }
507
508  --executing->resource_count;
509
510  new_owner = _Thread_queue_First_locked(
511    &the_mutex->Recursive.Mutex.Wait_queue,
512    CORE_MUTEX_TQ_OPERATIONS
513  );
514  _CORE_mutex_Set_owner( &the_mutex->Recursive.Mutex, new_owner );
515
516  if ( new_owner != NULL ) {
517    bool unblock;
518
519    /*
520     * We must extract the thread now since this will restore its default
521     * thread lock.  This is necessary to avoid a deadlock in the
522     * _Thread_Change_priority() below due to a recursive thread queue lock
523     * acquire.
524     */
525    unblock = _Thread_queue_Extract_locked(
526      &the_mutex->Recursive.Mutex.Wait_queue.Queue,
527      CORE_MUTEX_TQ_OPERATIONS,
528      new_owner,
529      queue_context
530    );
531
532#if defined(RTEMS_MULTIPROCESSING)
533    if ( _Objects_Is_local_id( new_owner->Object.id ) )
534#endif
535    {
536      ++new_owner->resource_count;
537      _Thread_Raise_priority( new_owner, the_mutex->priority_ceiling );
538    }
539
540    _Thread_queue_Unblock_critical(
541      unblock,
542      &the_mutex->Recursive.Mutex.Wait_queue.Queue,
543      new_owner,
544      &queue_context->Lock_context
545    );
546  } else {
547    _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
548  }
549
550  _CORE_mutex_Restore_priority( executing );
551  return STATUS_SUCCESSFUL;
552}
553
554/** @} */
555
556#ifdef __cplusplus
557}
558#endif
559
560#endif
561/* end of include file */
Note: See TracBrowser for help on using the repository browser.