|
@@ -1481,6 +1481,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
|
|
|
{
|
|
|
might_sleep();
|
|
|
|
|
|
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
|
rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
|
@@ -1496,9 +1497,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
|
|
*/
|
|
|
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
|
|
|
{
|
|
|
+ int ret;
|
|
|
+
|
|
|
might_sleep();
|
|
|
|
|
|
- return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
|
|
|
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
|
+ ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
|
|
|
+ if (ret)
|
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
|
|
|
|
|
@@ -1526,11 +1534,18 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
|
|
|
int
|
|
|
rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
|
|
|
{
|
|
|
+ int ret;
|
|
|
+
|
|
|
might_sleep();
|
|
|
|
|
|
- return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
|
|
|
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
|
+ ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
|
|
|
RT_MUTEX_MIN_CHAINWALK,
|
|
|
rt_mutex_slowlock);
|
|
|
+ if (ret)
|
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
|
|
|
|
|
@@ -1547,10 +1562,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
|
|
|
*/
|
|
|
int __sched rt_mutex_trylock(struct rt_mutex *lock)
|
|
|
{
|
|
|
+ int ret;
|
|
|
+
|
|
|
if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
|
|
|
return 0;
|
|
|
|
|
|
- return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
|
|
|
+ ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
|
|
|
+ if (ret)
|
|
|
+ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
|
|
|
|
|
@@ -1561,6 +1582,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
|
|
|
*/
|
|
|
void __sched rt_mutex_unlock(struct rt_mutex *lock)
|
|
|
{
|
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
|
rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
|
|
@@ -1620,7 +1642,6 @@ void rt_mutex_destroy(struct rt_mutex *lock)
|
|
|
lock->magic = NULL;
|
|
|
#endif
|
|
|
}
|
|
|
-
|
|
|
EXPORT_SYMBOL_GPL(rt_mutex_destroy);
|
|
|
|
|
|
/**
|
|
@@ -1632,14 +1653,15 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
|
|
|
*
|
|
|
* Initializing of a locked rt lock is not allowed
|
|
|
*/
|
|
|
-void __rt_mutex_init(struct rt_mutex *lock, const char *name)
|
|
|
+void __rt_mutex_init(struct rt_mutex *lock, const char *name,
|
|
|
+ struct lock_class_key *key)
|
|
|
{
|
|
|
lock->owner = NULL;
|
|
|
raw_spin_lock_init(&lock->wait_lock);
|
|
|
lock->waiters = RB_ROOT;
|
|
|
lock->waiters_leftmost = NULL;
|
|
|
|
|
|
- debug_rt_mutex_init(lock, name);
|
|
|
+ debug_rt_mutex_init(lock, name, key);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(__rt_mutex_init);
|
|
|
|
|
@@ -1660,7 +1682,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
|
|
|
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
|
|
struct task_struct *proxy_owner)
|
|
|
{
|
|
|
- __rt_mutex_init(lock, NULL);
|
|
|
+ __rt_mutex_init(lock, NULL, NULL);
|
|
|
debug_rt_mutex_proxy_lock(lock, proxy_owner);
|
|
|
rt_mutex_set_owner(lock, proxy_owner);
|
|
|
}
|