1 | /* |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @brief Wait for Spinlock |
---|
5 | * @ingroup ScoreSpinlock |
---|
6 | */ |
---|
7 | |
---|
8 | /* |
---|
9 | * COPYRIGHT (c) 1989-2009. |
---|
10 | * On-Line Applications Research Corporation (OAR). |
---|
11 | * |
---|
12 | * The license and distribution terms for this file may be |
---|
13 | * found in the file LICENSE in this distribution or at |
---|
14 | * http://www.rtems.org/license/LICENSE. |
---|
15 | */ |
---|
16 | |
---|
17 | #if HAVE_CONFIG_H |
---|
18 | #include "config.h" |
---|
19 | #endif |
---|
20 | |
---|
21 | #include <rtems/score/corespinlockimpl.h> |
---|
22 | #include <rtems/score/percpu.h> |
---|
23 | |
---|
24 | CORE_spinlock_Status _CORE_spinlock_Seize( |
---|
25 | CORE_spinlock_Control *the_spinlock, |
---|
26 | bool wait, |
---|
27 | Watchdog_Interval timeout, |
---|
28 | ISR_lock_Context *lock_context |
---|
29 | ) |
---|
30 | { |
---|
31 | Thread_Control *executing; |
---|
32 | |
---|
33 | #if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API) |
---|
34 | Watchdog_Interval limit = _Watchdog_Ticks_since_boot + timeout; |
---|
35 | #endif |
---|
36 | |
---|
37 | executing = _Thread_Executing; |
---|
38 | |
---|
39 | _CORE_spinlock_Acquire_critical( the_spinlock, lock_context ); |
---|
40 | if ( the_spinlock->lock == CORE_SPINLOCK_LOCKED && |
---|
41 | the_spinlock->holder == executing ) { |
---|
42 | _CORE_spinlock_Release( the_spinlock, lock_context ); |
---|
43 | return CORE_SPINLOCK_HOLDER_RELOCKING; |
---|
44 | } |
---|
45 | the_spinlock->users += 1; |
---|
46 | for ( ;; ) { |
---|
47 | if ( the_spinlock->lock == CORE_SPINLOCK_UNLOCKED ) { |
---|
48 | the_spinlock->lock = CORE_SPINLOCK_LOCKED; |
---|
49 | the_spinlock->holder = executing; |
---|
50 | _CORE_spinlock_Release( the_spinlock, lock_context ); |
---|
51 | return CORE_SPINLOCK_SUCCESSFUL; |
---|
52 | } |
---|
53 | |
---|
54 | /* |
---|
55 | * Spinlock is unavailable. If not willing to wait, return. |
---|
56 | */ |
---|
57 | if ( !wait ) { |
---|
58 | the_spinlock->users -= 1; |
---|
59 | _CORE_spinlock_Release( the_spinlock, lock_context ); |
---|
60 | return CORE_SPINLOCK_UNAVAILABLE; |
---|
61 | } |
---|
62 | |
---|
63 | #if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API) |
---|
64 | /* |
---|
65 | * They are willing to wait but there could be a timeout. |
---|
66 | */ |
---|
67 | if ( timeout && (limit <= _Watchdog_Ticks_since_boot) ) { |
---|
68 | the_spinlock->users -= 1; |
---|
69 | _CORE_spinlock_Release( the_spinlock, lock_context ); |
---|
70 | return CORE_SPINLOCK_TIMEOUT; |
---|
71 | } |
---|
72 | #endif |
---|
73 | |
---|
74 | /* |
---|
75 | * The thread is willing to spin so let's set things up so |
---|
76 | * another thread has a chance of running. This spinlock has |
---|
77 | * to be released by either another thread or an ISR. Since |
---|
78 | * POSIX does not say anything about ISRs, that implies that |
---|
79 | * another thread must be able to run while spinning. We are |
---|
80 | * not blocking so that implies we are at least preemptible |
---|
81 | * and possibly time-sliced. |
---|
82 | * |
---|
83 | * So first, we will enable interrpts to allow for them to happen. |
---|
84 | * Then we will "flash" the thread dispatching critical section |
---|
85 | * so other threads have a chance to run. |
---|
86 | * |
---|
87 | * A spinlock cannot be deleted while it is being used so we are |
---|
88 | * safe from deletion. |
---|
89 | */ |
---|
90 | |
---|
91 | _CORE_spinlock_Release( the_spinlock, lock_context ); |
---|
92 | |
---|
93 | /* |
---|
94 | * An ISR could occur here. Another thread could get dispatched here. |
---|
95 | * Reenter the critical sections so we can attempt the lock again. |
---|
96 | */ |
---|
97 | |
---|
98 | _ISR_lock_ISR_disable( lock_context ); |
---|
99 | _CORE_spinlock_Acquire_critical( the_spinlock, lock_context ); |
---|
100 | } |
---|
101 | |
---|
102 | } |
---|