|
@@ -1465,6 +1465,29 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
|
|
rt_mutex_postunlock(&wake_q);
|
|
rt_mutex_postunlock(&wake_q);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
|
|
|
|
+{
|
|
|
|
+ might_sleep();
|
|
|
|
+
|
|
|
|
+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
|
|
|
+ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
|
+/**
|
|
|
|
+ * rt_mutex_lock_nested - lock a rt_mutex
|
|
|
|
+ *
|
|
|
|
+ * @lock: the rt_mutex to be locked
|
|
|
|
+ * @subclass: the lockdep subclass
|
|
|
|
+ */
|
|
|
|
+void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
|
|
|
|
+{
|
|
|
|
+ __rt_mutex_lock(lock, subclass);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
|
/**
|
|
/**
|
|
* rt_mutex_lock - lock a rt_mutex
|
|
* rt_mutex_lock - lock a rt_mutex
|
|
*
|
|
*
|
|
@@ -1472,12 +1495,10 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
|
|
*/
|
|
*/
|
|
void __sched rt_mutex_lock(struct rt_mutex *lock)
|
|
void __sched rt_mutex_lock(struct rt_mutex *lock)
|
|
{
|
|
{
|
|
- might_sleep();
|
|
|
|
-
|
|
|
|
- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
|
|
- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
|
|
|
|
|
|
+ __rt_mutex_lock(lock, 0);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
|
EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
|
|
|
+#endif
|
|
|
|
|
|
/**
|
|
/**
|
|
* rt_mutex_lock_interruptible - lock a rt_mutex interruptible
|
|
* rt_mutex_lock_interruptible - lock a rt_mutex interruptible
|