|
@@ -300,7 +300,7 @@ static void __rt_mutex_adjust_prio(struct task_struct *task)
|
|
|
* of task. We do not use the spin_xx_mutex() variants here as we are
|
|
|
* outside of the debug path.)
|
|
|
*/
|
|
|
-static void rt_mutex_adjust_prio(struct task_struct *task)
|
|
|
+void rt_mutex_adjust_prio(struct task_struct *task)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
@@ -1247,13 +1247,12 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Slow path to release a rt-mutex:
|
|
|
+ * Slow path to release a rt-mutex.
|
|
|
+ * Return whether the current task needs to undo a potential priority boosting.
|
|
|
*/
|
|
|
-static void __sched
|
|
|
-rt_mutex_slowunlock(struct rt_mutex *lock)
|
|
|
+static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
|
|
+ struct wake_q_head *wake_q)
|
|
|
{
|
|
|
- WAKE_Q(wake_q);
|
|
|
-
|
|
|
raw_spin_lock(&lock->wait_lock);
|
|
|
|
|
|
debug_rt_mutex_unlock(lock);
|
|
@@ -1294,7 +1293,7 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
|
|
|
while (!rt_mutex_has_waiters(lock)) {
|
|
|
/* Drops lock->wait_lock ! */
|
|
|
if (unlock_rt_mutex_safe(lock) == true)
|
|
|
- return;
|
|
|
+ return false;
|
|
|
/* Relock the rtmutex and try again */
|
|
|
raw_spin_lock(&lock->wait_lock);
|
|
|
}
|
|
@@ -1305,13 +1304,12 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
|
|
|
*
|
|
|
* Queue the next waiter for wakeup once we release the wait_lock.
|
|
|
*/
|
|
|
- mark_wakeup_next_waiter(&wake_q, lock);
|
|
|
+ mark_wakeup_next_waiter(wake_q, lock);
|
|
|
|
|
|
raw_spin_unlock(&lock->wait_lock);
|
|
|
- wake_up_q(&wake_q);
|
|
|
|
|
|
- /* Undo pi boosting if necessary: */
|
|
|
- rt_mutex_adjust_prio(current);
|
|
|
+ /* check PI boosting */
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1362,12 +1360,23 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
|
|
|
|
|
|
static inline void
|
|
|
rt_mutex_fastunlock(struct rt_mutex *lock,
|
|
|
- void (*slowfn)(struct rt_mutex *lock))
|
|
|
+ bool (*slowfn)(struct rt_mutex *lock,
|
|
|
+ struct wake_q_head *wqh))
|
|
|
{
|
|
|
- if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
|
|
|
+ WAKE_Q(wake_q);
|
|
|
+
|
|
|
+ if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
|
|
|
rt_mutex_deadlock_account_unlock(current);
|
|
|
- else
|
|
|
- slowfn(lock);
|
|
|
+
|
|
|
+ } else {
|
|
|
+ bool deboost = slowfn(lock, &wake_q);
|
|
|
+
|
|
|
+ wake_up_q(&wake_q);
|
|
|
+
|
|
|
+ /* Undo pi boosting if necessary: */
|
|
|
+ if (deboost)
|
|
|
+ rt_mutex_adjust_prio(current);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1461,6 +1470,23 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
|
|
|
|
|
|
+/**
|
|
|
+ * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
|
|
|
+ * @lock: the rt_mutex to be unlocked
|
|
|
+ *
|
|
|
+ * Returns: true/false indicating whether priority adjustment is
|
|
|
+ * required or not.
|
|
|
+ */
|
|
|
+bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
|
|
|
+ struct wake_q_head *wqh)
|
|
|
+{
|
|
|
+ if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
|
|
|
+ rt_mutex_deadlock_account_unlock(current);
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return rt_mutex_slowunlock(lock, wqh);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* rt_mutex_destroy - mark a mutex unusable
|
|
|
* @lock: the mutex to be destroyed
|