|
@@ -99,13 +99,14 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
|
|
|
* 2) Drop lock->wait_lock
|
|
|
* 3) Try to unlock the lock with cmpxchg
|
|
|
*/
|
|
|
-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
|
|
|
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
|
|
|
+ unsigned long flags)
|
|
|
__releases(lock->wait_lock)
|
|
|
{
|
|
|
struct task_struct *owner = rt_mutex_owner(lock);
|
|
|
|
|
|
clear_rt_mutex_waiters(lock);
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
|
/*
|
|
|
* If a new waiter comes in between the unlock and the cmpxchg
|
|
|
* we have two situations:
|
|
@@ -147,11 +148,12 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
|
|
|
/*
|
|
|
* Simple slow path only version: lock->owner is protected by lock->wait_lock.
|
|
|
*/
|
|
|
-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
|
|
|
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
|
|
|
+ unsigned long flags)
|
|
|
__releases(lock->wait_lock)
|
|
|
{
|
|
|
lock->owner = NULL;
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
|
return true;
|
|
|
}
|
|
|
#endif
|
|
@@ -433,7 +435,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
int ret = 0, depth = 0;
|
|
|
struct rt_mutex *lock;
|
|
|
bool detect_deadlock;
|
|
|
- unsigned long flags;
|
|
|
bool requeue = true;
|
|
|
|
|
|
detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
|
|
@@ -476,7 +477,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
/*
|
|
|
* [1] Task cannot go away as we did a get_task() before !
|
|
|
*/
|
|
|
- raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
|
+ raw_spin_lock_irq(&task->pi_lock);
|
|
|
|
|
|
/*
|
|
|
* [2] Get the waiter on which @task is blocked on.
|
|
@@ -560,7 +561,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
* operations.
|
|
|
*/
|
|
|
if (!raw_spin_trylock(&lock->wait_lock)) {
|
|
|
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
+ raw_spin_unlock_irq(&task->pi_lock);
|
|
|
cpu_relax();
|
|
|
goto retry;
|
|
|
}
|
|
@@ -591,7 +592,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
/*
|
|
|
* No requeue[7] here. Just release @task [8]
|
|
|
*/
|
|
|
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
+ raw_spin_unlock(&task->pi_lock);
|
|
|
put_task_struct(task);
|
|
|
|
|
|
/*
|
|
@@ -599,14 +600,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
* If there is no owner of the lock, end of chain.
|
|
|
*/
|
|
|
if (!rt_mutex_owner(lock)) {
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
/* [10] Grab the next task, i.e. owner of @lock */
|
|
|
task = rt_mutex_owner(lock);
|
|
|
get_task_struct(task);
|
|
|
- raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
|
+ raw_spin_lock(&task->pi_lock);
|
|
|
|
|
|
/*
|
|
|
* No requeue [11] here. We just do deadlock detection.
|
|
@@ -621,8 +622,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
top_waiter = rt_mutex_top_waiter(lock);
|
|
|
|
|
|
/* [13] Drop locks */
|
|
|
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock(&task->pi_lock);
|
|
|
+ raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
|
|
|
/* If owner is not blocked, end of chain. */
|
|
|
if (!next_lock)
|
|
@@ -643,7 +644,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
rt_mutex_enqueue(lock, waiter);
|
|
|
|
|
|
/* [8] Release the task */
|
|
|
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
+ raw_spin_unlock(&task->pi_lock);
|
|
|
put_task_struct(task);
|
|
|
|
|
|
/*
|
|
@@ -661,14 +662,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
*/
|
|
|
if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
|
|
|
wake_up_process(rt_mutex_top_waiter(lock)->task);
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
/* [10] Grab the next task, i.e. the owner of @lock */
|
|
|
task = rt_mutex_owner(lock);
|
|
|
get_task_struct(task);
|
|
|
- raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
|
+ raw_spin_lock(&task->pi_lock);
|
|
|
|
|
|
/* [11] requeue the pi waiters if necessary */
|
|
|
if (waiter == rt_mutex_top_waiter(lock)) {
|
|
@@ -722,8 +723,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
top_waiter = rt_mutex_top_waiter(lock);
|
|
|
|
|
|
/* [13] Drop the locks */
|
|
|
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock(&task->pi_lock);
|
|
|
+ raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
|
|
|
/*
|
|
|
* Make the actual exit decisions [12], based on the stored
|
|
@@ -746,7 +747,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
goto again;
|
|
|
|
|
|
out_unlock_pi:
|
|
|
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
+ raw_spin_unlock_irq(&task->pi_lock);
|
|
|
out_put_task:
|
|
|
put_task_struct(task);
|
|
|
|
|
@@ -756,7 +757,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
/*
|
|
|
* Try to take an rt-mutex
|
|
|
*
|
|
|
- * Must be called with lock->wait_lock held.
|
|
|
+ * Must be called with lock->wait_lock held and interrupts disabled
|
|
|
*
|
|
|
* @lock: The lock to be acquired.
|
|
|
* @task: The task which wants to acquire the lock
|
|
@@ -766,8 +767,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
|
struct rt_mutex_waiter *waiter)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
/*
|
|
|
* Before testing whether we can acquire @lock, we set the
|
|
|
* RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
|
|
@@ -852,7 +851,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
|
* case, but conditionals are more expensive than a redundant
|
|
|
* store.
|
|
|
*/
|
|
|
- raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
|
+ raw_spin_lock(&task->pi_lock);
|
|
|
task->pi_blocked_on = NULL;
|
|
|
/*
|
|
|
* Finish the lock acquisition. @task is the new owner. If
|
|
@@ -861,7 +860,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
|
*/
|
|
|
if (rt_mutex_has_waiters(lock))
|
|
|
rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
|
|
|
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
+ raw_spin_unlock(&task->pi_lock);
|
|
|
|
|
|
takeit:
|
|
|
/* We got the lock. */
|
|
@@ -883,7 +882,7 @@ takeit:
|
|
|
*
|
|
|
* Prepare waiter and propagate pi chain
|
|
|
*
|
|
|
- * This must be called with lock->wait_lock held.
|
|
|
+ * This must be called with lock->wait_lock held and interrupts disabled
|
|
|
*/
|
|
|
static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
|
struct rt_mutex_waiter *waiter,
|
|
@@ -894,7 +893,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
|
struct rt_mutex_waiter *top_waiter = waiter;
|
|
|
struct rt_mutex *next_lock;
|
|
|
int chain_walk = 0, res;
|
|
|
- unsigned long flags;
|
|
|
|
|
|
/*
|
|
|
* Early deadlock detection. We really don't want the task to
|
|
@@ -908,7 +906,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
|
if (owner == task)
|
|
|
return -EDEADLK;
|
|
|
|
|
|
- raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
|
+ raw_spin_lock(&task->pi_lock);
|
|
|
__rt_mutex_adjust_prio(task);
|
|
|
waiter->task = task;
|
|
|
waiter->lock = lock;
|
|
@@ -921,12 +919,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
|
|
|
|
task->pi_blocked_on = waiter;
|
|
|
|
|
|
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
+ raw_spin_unlock(&task->pi_lock);
|
|
|
|
|
|
if (!owner)
|
|
|
return 0;
|
|
|
|
|
|
- raw_spin_lock_irqsave(&owner->pi_lock, flags);
|
|
|
+ raw_spin_lock(&owner->pi_lock);
|
|
|
if (waiter == rt_mutex_top_waiter(lock)) {
|
|
|
rt_mutex_dequeue_pi(owner, top_waiter);
|
|
|
rt_mutex_enqueue_pi(owner, waiter);
|
|
@@ -941,7 +939,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
|
/* Store the lock on which owner is blocked or NULL */
|
|
|
next_lock = task_blocked_on_lock(owner);
|
|
|
|
|
|
- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
|
|
|
+ raw_spin_unlock(&owner->pi_lock);
|
|
|
/*
|
|
|
* Even if full deadlock detection is on, if the owner is not
|
|
|
* blocked itself, we can avoid finding this out in the chain
|
|
@@ -957,12 +955,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
|
*/
|
|
|
get_task_struct(owner);
|
|
|
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
|
|
|
res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
|
|
|
next_lock, waiter, task);
|
|
|
|
|
|
- raw_spin_lock(&lock->wait_lock);
|
|
|
+ raw_spin_lock_irq(&lock->wait_lock);
|
|
|
|
|
|
return res;
|
|
|
}
|
|
@@ -971,15 +969,14 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
|
* Remove the top waiter from the current tasks pi waiter tree and
|
|
|
* queue it up.
|
|
|
*
|
|
|
- * Called with lock->wait_lock held.
|
|
|
+ * Called with lock->wait_lock held and interrupts disabled.
|
|
|
*/
|
|
|
static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
|
|
|
struct rt_mutex *lock)
|
|
|
{
|
|
|
struct rt_mutex_waiter *waiter;
|
|
|
- unsigned long flags;
|
|
|
|
|
|
- raw_spin_lock_irqsave(¤t->pi_lock, flags);
|
|
|
+ raw_spin_lock(¤t->pi_lock);
|
|
|
|
|
|
waiter = rt_mutex_top_waiter(lock);
|
|
|
|
|
@@ -1001,7 +998,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
|
|
|
*/
|
|
|
lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
|
|
|
|
|
|
- raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
|
|
|
+ raw_spin_unlock(¤t->pi_lock);
|
|
|
|
|
|
wake_q_add(wake_q, waiter->task);
|
|
|
}
|
|
@@ -1009,7 +1006,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
|
|
|
/*
|
|
|
* Remove a waiter from a lock and give up
|
|
|
*
|
|
|
- * Must be called with lock->wait_lock held and
|
|
|
+ * Must be called with lock->wait_lock held and interrupts disabled. I must
|
|
|
* have just failed to try_to_take_rt_mutex().
|
|
|
*/
|
|
|
static void remove_waiter(struct rt_mutex *lock,
|
|
@@ -1018,12 +1015,11 @@ static void remove_waiter(struct rt_mutex *lock,
|
|
|
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
|
|
|
struct task_struct *owner = rt_mutex_owner(lock);
|
|
|
struct rt_mutex *next_lock;
|
|
|
- unsigned long flags;
|
|
|
|
|
|
- raw_spin_lock_irqsave(¤t->pi_lock, flags);
|
|
|
+ raw_spin_lock(¤t->pi_lock);
|
|
|
rt_mutex_dequeue(lock, waiter);
|
|
|
current->pi_blocked_on = NULL;
|
|
|
- raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
|
|
|
+ raw_spin_unlock(¤t->pi_lock);
|
|
|
|
|
|
/*
|
|
|
* Only update priority if the waiter was the highest priority
|
|
@@ -1032,7 +1028,7 @@ static void remove_waiter(struct rt_mutex *lock,
|
|
|
if (!owner || !is_top_waiter)
|
|
|
return;
|
|
|
|
|
|
- raw_spin_lock_irqsave(&owner->pi_lock, flags);
|
|
|
+ raw_spin_lock(&owner->pi_lock);
|
|
|
|
|
|
rt_mutex_dequeue_pi(owner, waiter);
|
|
|
|
|
@@ -1044,7 +1040,7 @@ static void remove_waiter(struct rt_mutex *lock,
|
|
|
/* Store the lock on which owner is blocked or NULL */
|
|
|
next_lock = task_blocked_on_lock(owner);
|
|
|
|
|
|
- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
|
|
|
+ raw_spin_unlock(&owner->pi_lock);
|
|
|
|
|
|
/*
|
|
|
* Don't walk the chain, if the owner task is not blocked
|
|
@@ -1056,12 +1052,12 @@ static void remove_waiter(struct rt_mutex *lock,
|
|
|
/* gets dropped in rt_mutex_adjust_prio_chain()! */
|
|
|
get_task_struct(owner);
|
|
|
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
|
|
|
rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
|
|
|
next_lock, NULL, current);
|
|
|
|
|
|
- raw_spin_lock(&lock->wait_lock);
|
|
|
+ raw_spin_lock_irq(&lock->wait_lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1097,11 +1093,11 @@ void rt_mutex_adjust_pi(struct task_struct *task)
|
|
|
* __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
|
|
|
* @lock: the rt_mutex to take
|
|
|
* @state: the state the task should block in (TASK_INTERRUPTIBLE
|
|
|
- * or TASK_UNINTERRUPTIBLE)
|
|
|
+ * or TASK_UNINTERRUPTIBLE)
|
|
|
* @timeout: the pre-initialized and started timer, or NULL for none
|
|
|
* @waiter: the pre-initialized rt_mutex_waiter
|
|
|
*
|
|
|
- * lock->wait_lock must be held by the caller.
|
|
|
+ * Must be called with lock->wait_lock held and interrupts disabled
|
|
|
*/
|
|
|
static int __sched
|
|
|
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
@@ -1129,13 +1125,13 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
|
|
|
debug_rt_mutex_print_deadlock(waiter);
|
|
|
|
|
|
schedule();
|
|
|
|
|
|
- raw_spin_lock(&lock->wait_lock);
|
|
|
+ raw_spin_lock_irq(&lock->wait_lock);
|
|
|
set_current_state(state);
|
|
|
}
|
|
|
|
|
@@ -1172,17 +1168,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
|
enum rtmutex_chainwalk chwalk)
|
|
|
{
|
|
|
struct rt_mutex_waiter waiter;
|
|
|
+ unsigned long flags;
|
|
|
int ret = 0;
|
|
|
|
|
|
debug_rt_mutex_init_waiter(&waiter);
|
|
|
RB_CLEAR_NODE(&waiter.pi_tree_entry);
|
|
|
RB_CLEAR_NODE(&waiter.tree_entry);
|
|
|
|
|
|
- raw_spin_lock(&lock->wait_lock);
|
|
|
+ /*
|
|
|
+ * Technically we could use raw_spin_[un]lock_irq() here, but this can
|
|
|
+ * be called in early boot if the cmpxchg() fast path is disabled
|
|
|
+ * (debug, no architecture support). In this case we will acquire the
|
|
|
+ * rtmutex with lock->wait_lock held. But we cannot unconditionally
|
|
|
+ * enable interrupts in that early boot case. So we need to use the
|
|
|
+ * irqsave/restore variants.
|
|
|
+ */
|
|
|
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
|
|
|
|
|
/* Try to acquire the lock again: */
|
|
|
if (try_to_take_rt_mutex(lock, current, NULL)) {
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1211,7 +1216,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
|
*/
|
|
|
fixup_rt_mutex_waiters(lock);
|
|
|
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
|
|
|
|
/* Remove pending timer: */
|
|
|
if (unlikely(timeout))
|
|
@@ -1227,6 +1232,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
|
*/
|
|
|
static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
int ret;
|
|
|
|
|
|
/*
|
|
@@ -1238,10 +1244,10 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
|
|
|
return 0;
|
|
|
|
|
|
/*
|
|
|
- * The mutex has currently no owner. Lock the wait lock and
|
|
|
- * try to acquire the lock.
|
|
|
+ * The mutex has currently no owner. Lock the wait lock and try to
|
|
|
+ * acquire the lock. We use irqsave here to support early boot calls.
|
|
|
*/
|
|
|
- raw_spin_lock(&lock->wait_lock);
|
|
|
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
|
|
|
|
|
ret = try_to_take_rt_mutex(lock, current, NULL);
|
|
|
|
|
@@ -1251,7 +1257,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
|
|
|
*/
|
|
|
fixup_rt_mutex_waiters(lock);
|
|
|
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -1263,7 +1269,10 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
|
|
|
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
|
|
struct wake_q_head *wake_q)
|
|
|
{
|
|
|
- raw_spin_lock(&lock->wait_lock);
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ /* irqsave required to support early boot calls */
|
|
|
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
|
|
|
|
|
debug_rt_mutex_unlock(lock);
|
|
|
|
|
@@ -1302,10 +1311,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
|
|
*/
|
|
|
while (!rt_mutex_has_waiters(lock)) {
|
|
|
/* Drops lock->wait_lock ! */
|
|
|
- if (unlock_rt_mutex_safe(lock) == true)
|
|
|
+ if (unlock_rt_mutex_safe(lock, flags) == true)
|
|
|
return false;
|
|
|
/* Relock the rtmutex and try again */
|
|
|
- raw_spin_lock(&lock->wait_lock);
|
|
|
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1316,7 +1325,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
|
|
*/
|
|
|
mark_wakeup_next_waiter(wake_q, lock);
|
|
|
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
|
|
|
|
/* check PI boosting */
|
|
|
return true;
|
|
@@ -1596,10 +1605,10 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
- raw_spin_lock(&lock->wait_lock);
|
|
|
+ raw_spin_lock_irq(&lock->wait_lock);
|
|
|
|
|
|
if (try_to_take_rt_mutex(lock, task, NULL)) {
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
@@ -1620,7 +1629,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
|
if (unlikely(ret))
|
|
|
remove_waiter(lock, waiter);
|
|
|
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
|
|
|
debug_rt_mutex_print_deadlock(waiter);
|
|
|
|
|
@@ -1668,7 +1677,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
- raw_spin_lock(&lock->wait_lock);
|
|
|
+ raw_spin_lock_irq(&lock->wait_lock);
|
|
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
|
@@ -1684,7 +1693,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
|
|
|
*/
|
|
|
fixup_rt_mutex_waiters(lock);
|
|
|
|
|
|
- raw_spin_unlock(&lock->wait_lock);
|
|
|
+ raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
|
|
|
return ret;
|
|
|
}
|