|
@@ -67,8 +67,7 @@ EXPORT_SYMBOL(__mutex_init);
|
|
|
* We also put the fastpath first in the kernel image, to make sure the
|
|
|
* branch is predicted by the CPU as default-untaken.
|
|
|
*/
|
|
|
-static __used noinline void __sched
|
|
|
-__mutex_lock_slowpath(atomic_t *lock_count);
|
|
|
+__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
|
|
|
|
|
|
/**
|
|
|
* mutex_lock - acquire the mutex
|
|
@@ -225,7 +224,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
|
|
|
+__visible __used noinline
|
|
|
+void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
|
|
|
|
|
|
/**
|
|
|
* mutex_unlock - release the mutex
|
|
@@ -746,7 +746,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
|
|
|
/*
|
|
|
* Release the lock, slowpath:
|
|
|
*/
|
|
|
-static __used noinline void
|
|
|
+__visible void
|
|
|
__mutex_unlock_slowpath(atomic_t *lock_count)
|
|
|
{
|
|
|
__mutex_unlock_common_slowpath(lock_count, 1);
|
|
@@ -803,7 +803,7 @@ int __sched mutex_lock_killable(struct mutex *lock)
|
|
|
}
|
|
|
EXPORT_SYMBOL(mutex_lock_killable);
|
|
|
|
|
|
-static __used noinline void __sched
|
|
|
+__visible void __sched
|
|
|
__mutex_lock_slowpath(atomic_t *lock_count)
|
|
|
{
|
|
|
struct mutex *lock = container_of(lock_count, struct mutex, count);
|