|
@@ -938,8 +938,6 @@ takeit:
|
|
*/
|
|
*/
|
|
rt_mutex_set_owner(lock, task);
|
|
rt_mutex_set_owner(lock, task);
|
|
|
|
|
|
- rt_mutex_deadlock_account_lock(lock, task);
|
|
|
|
-
|
|
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1342,8 +1340,6 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
|
|
|
|
|
debug_rt_mutex_unlock(lock);
|
|
debug_rt_mutex_unlock(lock);
|
|
|
|
|
|
- rt_mutex_deadlock_account_unlock(current);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* We must be careful here if the fast path is enabled. If we
|
|
* We must be careful here if the fast path is enabled. If we
|
|
* have no waiters queued we cannot set owner to NULL here
|
|
* have no waiters queued we cannot set owner to NULL here
|
|
@@ -1409,11 +1405,10 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
|
|
struct hrtimer_sleeper *timeout,
|
|
struct hrtimer_sleeper *timeout,
|
|
enum rtmutex_chainwalk chwalk))
|
|
enum rtmutex_chainwalk chwalk))
|
|
{
|
|
{
|
|
- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
|
|
|
|
- rt_mutex_deadlock_account_lock(lock, current);
|
|
|
|
|
|
+ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
|
return 0;
|
|
return 0;
|
|
- } else
|
|
|
|
- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
|
|
|
|
|
|
+
|
|
|
|
+ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
|
|
}
|
|
}
|
|
|
|
|
|
static inline int
|
|
static inline int
|
|
@@ -1425,21 +1420,19 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
|
|
enum rtmutex_chainwalk chwalk))
|
|
enum rtmutex_chainwalk chwalk))
|
|
{
|
|
{
|
|
if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
|
|
if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
|
|
- likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
|
|
|
|
- rt_mutex_deadlock_account_lock(lock, current);
|
|
|
|
|
|
+ likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
|
return 0;
|
|
return 0;
|
|
- } else
|
|
|
|
- return slowfn(lock, state, timeout, chwalk);
|
|
|
|
|
|
+
|
|
|
|
+ return slowfn(lock, state, timeout, chwalk);
|
|
}
|
|
}
|
|
|
|
|
|
static inline int
|
|
static inline int
|
|
rt_mutex_fasttrylock(struct rt_mutex *lock,
|
|
rt_mutex_fasttrylock(struct rt_mutex *lock,
|
|
int (*slowfn)(struct rt_mutex *lock))
|
|
int (*slowfn)(struct rt_mutex *lock))
|
|
{
|
|
{
|
|
- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
|
|
|
|
- rt_mutex_deadlock_account_lock(lock, current);
|
|
|
|
|
|
+ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
|
return 1;
|
|
return 1;
|
|
- }
|
|
|
|
|
|
+
|
|
return slowfn(lock);
|
|
return slowfn(lock);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1449,19 +1442,18 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
|
|
struct wake_q_head *wqh))
|
|
struct wake_q_head *wqh))
|
|
{
|
|
{
|
|
DEFINE_WAKE_Q(wake_q);
|
|
DEFINE_WAKE_Q(wake_q);
|
|
|
|
+ bool deboost;
|
|
|
|
|
|
- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
|
|
|
|
- rt_mutex_deadlock_account_unlock(current);
|
|
|
|
|
|
+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
|
|
|
|
+ return;
|
|
|
|
|
|
- } else {
|
|
|
|
- bool deboost = slowfn(lock, &wake_q);
|
|
|
|
|
|
+ deboost = slowfn(lock, &wake_q);
|
|
|
|
|
|
- wake_up_q(&wake_q);
|
|
|
|
|
|
+ wake_up_q(&wake_q);
|
|
|
|
|
|
- /* Undo pi boosting if necessary: */
|
|
|
|
- if (deboost)
|
|
|
|
- rt_mutex_adjust_prio(current);
|
|
|
|
- }
|
|
|
|
|
|
+ /* Undo pi boosting if necessary: */
|
|
|
|
+ if (deboost)
|
|
|
|
+ rt_mutex_adjust_prio(current);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -1572,10 +1564,9 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
|
|
bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
|
|
bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
|
|
struct wake_q_head *wqh)
|
|
struct wake_q_head *wqh)
|
|
{
|
|
{
|
|
- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
|
|
|
|
- rt_mutex_deadlock_account_unlock(current);
|
|
|
|
|
|
+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
|
|
return false;
|
|
return false;
|
|
- }
|
|
|
|
|
|
+
|
|
return rt_mutex_slowunlock(lock, wqh);
|
|
return rt_mutex_slowunlock(lock, wqh);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1637,7 +1628,6 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
|
__rt_mutex_init(lock, NULL);
|
|
__rt_mutex_init(lock, NULL);
|
|
debug_rt_mutex_proxy_lock(lock, proxy_owner);
|
|
debug_rt_mutex_proxy_lock(lock, proxy_owner);
|
|
rt_mutex_set_owner(lock, proxy_owner);
|
|
rt_mutex_set_owner(lock, proxy_owner);
|
|
- rt_mutex_deadlock_account_lock(lock, proxy_owner);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -1657,7 +1647,6 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
|
{
|
|
{
|
|
debug_rt_mutex_proxy_unlock(lock);
|
|
debug_rt_mutex_proxy_unlock(lock);
|
|
rt_mutex_set_owner(lock, NULL);
|
|
rt_mutex_set_owner(lock, NULL);
|
|
- rt_mutex_deadlock_account_unlock(proxy_owner);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|