mutex.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. /*
  2. * kernel/locking/mutex.c
  3. *
  4. * Mutexes: blocking mutual exclusion locks
  5. *
  6. * Started by Ingo Molnar:
  7. *
  8. * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  9. *
  10. * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
  11. * David Howells for suggestions and improvements.
  12. *
  13. * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
  14. * from the -rt tree, where it was originally implemented for rtmutexes
  15. * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
  16. * and Sven Dietrich.
  17. *
  18. * Also see Documentation/locking/mutex-design.txt.
  19. */
  20. #include <linux/mutex.h>
  21. #include <linux/ww_mutex.h>
  22. #include <linux/sched.h>
  23. #include <linux/sched/rt.h>
  24. #include <linux/export.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/debug_locks.h>
  28. #include <linux/osq_lock.h>
  29. #ifdef CONFIG_DEBUG_MUTEXES
  30. # include "mutex-debug.h"
  31. #else
  32. # include "mutex.h"
  33. #endif
  34. void
  35. __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
  36. {
  37. atomic_long_set(&lock->owner, 0);
  38. spin_lock_init(&lock->wait_lock);
  39. INIT_LIST_HEAD(&lock->wait_list);
  40. #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  41. osq_lock_init(&lock->osq);
  42. #endif
  43. debug_mutex_init(lock, name, key);
  44. }
  45. EXPORT_SYMBOL(__mutex_init);
  46. /*
  47. * @owner: contains: 'struct task_struct *' to the current lock owner,
  48. * NULL means not owned. Since task_struct pointers are aligned at
  49. * ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low
  50. * bits to store extra state.
  51. *
  52. * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
  53. * Bit1 indicates unlock needs to hand the lock to the top-waiter
  54. */
  55. #define MUTEX_FLAG_WAITERS 0x01
  56. #define MUTEX_FLAG_HANDOFF 0x02
  57. #define MUTEX_FLAGS 0x03
  58. static inline struct task_struct *__owner_task(unsigned long owner)
  59. {
  60. return (struct task_struct *)(owner & ~MUTEX_FLAGS);
  61. }
  62. static inline unsigned long __owner_flags(unsigned long owner)
  63. {
  64. return owner & MUTEX_FLAGS;
  65. }
  66. /*
  67. * Actual trylock that will work on any unlocked state.
  68. *
  69. * When setting the owner field, we must preserve the low flag bits.
  70. *
  71. * Be careful with @handoff, only set that in a wait-loop (where you set
  72. * HANDOFF) to avoid recursive lock attempts.
  73. */
  74. static inline bool __mutex_trylock(struct mutex *lock, const bool handoff)
  75. {
  76. unsigned long owner, curr = (unsigned long)current;
  77. owner = atomic_long_read(&lock->owner);
  78. for (;;) { /* must loop, can race against a flag */
  79. unsigned long old, flags = __owner_flags(owner);
  80. if (__owner_task(owner)) {
  81. if (handoff && unlikely(__owner_task(owner) == current)) {
  82. /*
  83. * Provide ACQUIRE semantics for the lock-handoff.
  84. *
  85. * We cannot easily use load-acquire here, since
  86. * the actual load is a failed cmpxchg, which
  87. * doesn't imply any barriers.
  88. *
  89. * Also, this is a fairly unlikely scenario, and
  90. * this contains the cost.
  91. */
  92. smp_mb(); /* ACQUIRE */
  93. return true;
  94. }
  95. return false;
  96. }
  97. /*
  98. * We set the HANDOFF bit, we must make sure it doesn't live
  99. * past the point where we acquire it. This would be possible
  100. * if we (accidentally) set the bit on an unlocked mutex.
  101. */
  102. if (handoff)
  103. flags &= ~MUTEX_FLAG_HANDOFF;
  104. old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
  105. if (old == owner)
  106. return true;
  107. owner = old;
  108. }
  109. }
  110. #ifndef CONFIG_DEBUG_LOCK_ALLOC
  111. /*
  112. * Lockdep annotations are contained to the slow paths for simplicity.
  113. * There is nothing that would stop spreading the lockdep annotations outwards
  114. * except more code.
  115. */
  116. /*
  117. * Optimistic trylock that only works in the uncontended case. Make sure to
  118. * follow with a __mutex_trylock() before failing.
  119. */
  120. static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
  121. {
  122. unsigned long curr = (unsigned long)current;
  123. if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
  124. return true;
  125. return false;
  126. }
  127. static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
  128. {
  129. unsigned long curr = (unsigned long)current;
  130. if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
  131. return true;
  132. return false;
  133. }
  134. #endif
  135. static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
  136. {
  137. atomic_long_or(flag, &lock->owner);
  138. }
  139. static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
  140. {
  141. atomic_long_andnot(flag, &lock->owner);
  142. }
  143. static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
  144. {
  145. return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
  146. }
  147. /*
  148. * Give up ownership to a specific task, when @task = NULL, this is equivalent
  149. * to a regular unlock. Clears HANDOFF, preserves WAITERS. Provides RELEASE
  150. * semantics like a regular unlock, the __mutex_trylock() provides matching
  151. * ACQUIRE semantics for the handoff.
  152. */
  153. static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
  154. {
  155. unsigned long owner = atomic_long_read(&lock->owner);
  156. for (;;) {
  157. unsigned long old, new;
  158. #ifdef CONFIG_DEBUG_MUTEXES
  159. DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
  160. #endif
  161. new = (owner & MUTEX_FLAG_WAITERS);
  162. new |= (unsigned long)task;
  163. old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
  164. if (old == owner)
  165. break;
  166. owner = old;
  167. }
  168. }
  169. #ifndef CONFIG_DEBUG_LOCK_ALLOC
  170. /*
  171. * We split the mutex lock/unlock logic into separate fastpath and
  172. * slowpath functions, to reduce the register pressure on the fastpath.
  173. * We also put the fastpath first in the kernel image, to make sure the
  174. * branch is predicted by the CPU as default-untaken.
  175. */
  176. static void __sched __mutex_lock_slowpath(struct mutex *lock);
  177. /**
  178. * mutex_lock - acquire the mutex
  179. * @lock: the mutex to be acquired
  180. *
  181. * Lock the mutex exclusively for this task. If the mutex is not
  182. * available right now, it will sleep until it can get it.
  183. *
  184. * The mutex must later on be released by the same task that
  185. * acquired it. Recursive locking is not allowed. The task
  186. * may not exit without first unlocking the mutex. Also, kernel
  187. * memory where the mutex resides must not be freed with
  188. * the mutex still locked. The mutex must first be initialized
  189. * (or statically defined) before it can be locked. memset()-ing
  190. * the mutex to 0 is not allowed.
  191. *
  192. * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
  193. * checks that will enforce the restrictions and will also do
  194. * deadlock debugging. )
  195. *
  196. * This function is similar to (but not equivalent to) down().
  197. */
  198. void __sched mutex_lock(struct mutex *lock)
  199. {
  200. might_sleep();
  201. if (!__mutex_trylock_fast(lock))
  202. __mutex_lock_slowpath(lock);
  203. }
  204. EXPORT_SYMBOL(mutex_lock);
  205. #endif
  206. static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
  207. struct ww_acquire_ctx *ww_ctx)
  208. {
  209. #ifdef CONFIG_DEBUG_MUTEXES
  210. /*
  211. * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
  212. * but released with a normal mutex_unlock in this call.
  213. *
  214. * This should never happen, always use ww_mutex_unlock.
  215. */
  216. DEBUG_LOCKS_WARN_ON(ww->ctx);
  217. /*
  218. * Not quite done after calling ww_acquire_done() ?
  219. */
  220. DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
  221. if (ww_ctx->contending_lock) {
  222. /*
  223. * After -EDEADLK you tried to
  224. * acquire a different ww_mutex? Bad!
  225. */
  226. DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
  227. /*
  228. * You called ww_mutex_lock after receiving -EDEADLK,
  229. * but 'forgot' to unlock everything else first?
  230. */
  231. DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
  232. ww_ctx->contending_lock = NULL;
  233. }
  234. /*
  235. * Naughty, using a different class will lead to undefined behavior!
  236. */
  237. DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
  238. #endif
  239. ww_ctx->acquired++;
  240. }
  241. /*
  242. * After acquiring lock with fastpath or when we lost out in contested
  243. * slowpath, set ctx and wake up any waiters so they can recheck.
  244. */
  245. static __always_inline void
  246. ww_mutex_set_context_fastpath(struct ww_mutex *lock,
  247. struct ww_acquire_ctx *ctx)
  248. {
  249. unsigned long flags;
  250. struct mutex_waiter *cur;
  251. ww_mutex_lock_acquired(lock, ctx);
  252. lock->ctx = ctx;
  253. /*
  254. * The lock->ctx update should be visible on all cores before
  255. * the atomic read is done, otherwise contended waiters might be
  256. * missed. The contended waiters will either see ww_ctx == NULL
  257. * and keep spinning, or it will acquire wait_lock, add itself
  258. * to waiter list and sleep.
  259. */
  260. smp_mb(); /* ^^^ */
  261. /*
  262. * Check if lock is contended, if not there is nobody to wake up
  263. */
  264. if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
  265. return;
  266. /*
  267. * Uh oh, we raced in fastpath, wake up everyone in this case,
  268. * so they can see the new lock->ctx.
  269. */
  270. spin_lock_mutex(&lock->base.wait_lock, flags);
  271. list_for_each_entry(cur, &lock->base.wait_list, list) {
  272. debug_mutex_wake_waiter(&lock->base, cur);
  273. wake_up_process(cur->task);
  274. }
  275. spin_unlock_mutex(&lock->base.wait_lock, flags);
  276. }
  277. /*
  278. * After acquiring lock in the slowpath set ctx and wake up any
  279. * waiters so they can recheck.
  280. *
  281. * Callers must hold the mutex wait_lock.
  282. */
  283. static __always_inline void
  284. ww_mutex_set_context_slowpath(struct ww_mutex *lock,
  285. struct ww_acquire_ctx *ctx)
  286. {
  287. struct mutex_waiter *cur;
  288. ww_mutex_lock_acquired(lock, ctx);
  289. lock->ctx = ctx;
  290. /*
  291. * Give any possible sleeping processes the chance to wake up,
  292. * so they can recheck if they have to back off.
  293. */
  294. list_for_each_entry(cur, &lock->base.wait_list, list) {
  295. debug_mutex_wake_waiter(&lock->base, cur);
  296. wake_up_process(cur->task);
  297. }
  298. }
  299. #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  300. /*
  301. * Look out! "owner" is an entirely speculative pointer
  302. * access and not reliable.
  303. */
  304. static noinline
  305. bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
  306. {
  307. bool ret = true;
  308. rcu_read_lock();
  309. while (__mutex_owner(lock) == owner) {
  310. /*
  311. * Ensure we emit the owner->on_cpu, dereference _after_
  312. * checking lock->owner still matches owner. If that fails,
  313. * owner might point to freed memory. If it still matches,
  314. * the rcu_read_lock() ensures the memory stays valid.
  315. */
  316. barrier();
  317. if (!owner->on_cpu || need_resched()) {
  318. ret = false;
  319. break;
  320. }
  321. cpu_relax();
  322. }
  323. rcu_read_unlock();
  324. return ret;
  325. }
  326. /*
  327. * Initial check for entering the mutex spinning loop
  328. */
  329. static inline int mutex_can_spin_on_owner(struct mutex *lock)
  330. {
  331. struct task_struct *owner;
  332. int retval = 1;
  333. if (need_resched())
  334. return 0;
  335. rcu_read_lock();
  336. owner = __mutex_owner(lock);
  337. if (owner)
  338. retval = owner->on_cpu;
  339. rcu_read_unlock();
  340. /*
  341. * If lock->owner is not set, the mutex has been released. Return true
  342. * such that we'll trylock in the spin path, which is a faster option
  343. * than the blocking slow path.
  344. */
  345. return retval;
  346. }
  347. /*
  348. * Optimistic spinning.
  349. *
  350. * We try to spin for acquisition when we find that the lock owner
  351. * is currently running on a (different) CPU and while we don't
  352. * need to reschedule. The rationale is that if the lock owner is
  353. * running, it is likely to release the lock soon.
  354. *
  355. * The mutex spinners are queued up using MCS lock so that only one
  356. * spinner can compete for the mutex. However, if mutex spinning isn't
  357. * going to happen, there is no point in going through the lock/unlock
  358. * overhead.
  359. *
  360. * Returns true when the lock was taken, otherwise false, indicating
  361. * that we need to jump to the slowpath and sleep.
  362. *
  363. * The waiter flag is set to true if the spinner is a waiter in the wait
  364. * queue. The waiter-spinner will spin on the lock directly and concurrently
  365. * with the spinner at the head of the OSQ, if present, until the owner is
  366. * changed to itself.
  367. */
  368. static bool mutex_optimistic_spin(struct mutex *lock,
  369. struct ww_acquire_ctx *ww_ctx,
  370. const bool use_ww_ctx, const bool waiter)
  371. {
  372. struct task_struct *task = current;
  373. if (!waiter) {
  374. /*
  375. * The purpose of the mutex_can_spin_on_owner() function is
  376. * to eliminate the overhead of osq_lock() and osq_unlock()
  377. * in case spinning isn't possible. As a waiter-spinner
  378. * is not going to take OSQ lock anyway, there is no need
  379. * to call mutex_can_spin_on_owner().
  380. */
  381. if (!mutex_can_spin_on_owner(lock))
  382. goto fail;
  383. /*
  384. * In order to avoid a stampede of mutex spinners trying to
  385. * acquire the mutex all at once, the spinners need to take a
  386. * MCS (queued) lock first before spinning on the owner field.
  387. */
  388. if (!osq_lock(&lock->osq))
  389. goto fail;
  390. }
  391. for (;;) {
  392. struct task_struct *owner;
  393. if (use_ww_ctx && ww_ctx->acquired > 0) {
  394. struct ww_mutex *ww;
  395. ww = container_of(lock, struct ww_mutex, base);
  396. /*
  397. * If ww->ctx is set the contents are undefined, only
  398. * by acquiring wait_lock there is a guarantee that
  399. * they are not invalid when reading.
  400. *
  401. * As such, when deadlock detection needs to be
  402. * performed the optimistic spinning cannot be done.
  403. */
  404. if (READ_ONCE(ww->ctx))
  405. goto fail_unlock;
  406. }
  407. /*
  408. * If there's an owner, wait for it to either
  409. * release the lock or go to sleep.
  410. */
  411. owner = __mutex_owner(lock);
  412. if (owner) {
  413. if (waiter && owner == task) {
  414. smp_mb(); /* ACQUIRE */
  415. break;
  416. }
  417. if (!mutex_spin_on_owner(lock, owner))
  418. goto fail_unlock;
  419. }
  420. /* Try to acquire the mutex if it is unlocked. */
  421. if (__mutex_trylock(lock, waiter))
  422. break;
  423. /*
  424. * The cpu_relax() call is a compiler barrier which forces
  425. * everything in this loop to be re-loaded. We don't need
  426. * memory barriers as we'll eventually observe the right
  427. * values at the cost of a few extra spins.
  428. */
  429. cpu_relax();
  430. }
  431. if (!waiter)
  432. osq_unlock(&lock->osq);
  433. return true;
  434. fail_unlock:
  435. if (!waiter)
  436. osq_unlock(&lock->osq);
  437. fail:
  438. /*
  439. * If we fell out of the spin path because of need_resched(),
  440. * reschedule now, before we try-lock the mutex. This avoids getting
  441. * scheduled out right after we obtained the mutex.
  442. */
  443. if (need_resched()) {
  444. /*
  445. * We _should_ have TASK_RUNNING here, but just in case
  446. * we do not, make it so, otherwise we might get stuck.
  447. */
  448. __set_current_state(TASK_RUNNING);
  449. schedule_preempt_disabled();
  450. }
  451. return false;
  452. }
  453. #else
  454. static bool mutex_optimistic_spin(struct mutex *lock,
  455. struct ww_acquire_ctx *ww_ctx,
  456. const bool use_ww_ctx, const bool waiter)
  457. {
  458. return false;
  459. }
  460. #endif
  461. static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
  462. /**
  463. * mutex_unlock - release the mutex
  464. * @lock: the mutex to be released
  465. *
  466. * Unlock a mutex that has been locked by this task previously.
  467. *
  468. * This function must not be used in interrupt context. Unlocking
  469. * of a not locked mutex is not allowed.
  470. *
  471. * This function is similar to (but not equivalent to) up().
  472. */
  473. void __sched mutex_unlock(struct mutex *lock)
  474. {
  475. #ifndef CONFIG_DEBUG_LOCK_ALLOC
  476. if (__mutex_unlock_fast(lock))
  477. return;
  478. #endif
  479. __mutex_unlock_slowpath(lock, _RET_IP_);
  480. }
  481. EXPORT_SYMBOL(mutex_unlock);
  482. /**
  483. * ww_mutex_unlock - release the w/w mutex
  484. * @lock: the mutex to be released
  485. *
  486. * Unlock a mutex that has been locked by this task previously with any of the
  487. * ww_mutex_lock* functions (with or without an acquire context). It is
  488. * forbidden to release the locks after releasing the acquire context.
  489. *
  490. * This function must not be used in interrupt context. Unlocking
  491. * of a unlocked mutex is not allowed.
  492. */
  493. void __sched ww_mutex_unlock(struct ww_mutex *lock)
  494. {
  495. /*
  496. * The unlocking fastpath is the 0->1 transition from 'locked'
  497. * into 'unlocked' state:
  498. */
  499. if (lock->ctx) {
  500. #ifdef CONFIG_DEBUG_MUTEXES
  501. DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
  502. #endif
  503. if (lock->ctx->acquired > 0)
  504. lock->ctx->acquired--;
  505. lock->ctx = NULL;
  506. }
  507. mutex_unlock(&lock->base);
  508. }
  509. EXPORT_SYMBOL(ww_mutex_unlock);
  510. static inline int __sched
  511. __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
  512. {
  513. struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
  514. struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
  515. if (!hold_ctx)
  516. return 0;
  517. if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
  518. (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
  519. #ifdef CONFIG_DEBUG_MUTEXES
  520. DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
  521. ctx->contending_lock = ww;
  522. #endif
  523. return -EDEADLK;
  524. }
  525. return 0;
  526. }
  527. /*
  528. * Lock a mutex (possibly interruptible), slowpath:
  529. */
  530. static __always_inline int __sched
  531. __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
  532. struct lockdep_map *nest_lock, unsigned long ip,
  533. struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
  534. {
  535. struct task_struct *task = current;
  536. struct mutex_waiter waiter;
  537. unsigned long flags;
  538. bool first = false;
  539. struct ww_mutex *ww;
  540. int ret;
  541. if (use_ww_ctx) {
  542. ww = container_of(lock, struct ww_mutex, base);
  543. if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
  544. return -EALREADY;
  545. }
  546. preempt_disable();
  547. mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
  548. if (__mutex_trylock(lock, false) ||
  549. mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
  550. /* got the lock, yay! */
  551. lock_acquired(&lock->dep_map, ip);
  552. if (use_ww_ctx)
  553. ww_mutex_set_context_fastpath(ww, ww_ctx);
  554. preempt_enable();
  555. return 0;
  556. }
  557. spin_lock_mutex(&lock->wait_lock, flags);
  558. /*
  559. * After waiting to acquire the wait_lock, try again.
  560. */
  561. if (__mutex_trylock(lock, false))
  562. goto skip_wait;
  563. debug_mutex_lock_common(lock, &waiter);
  564. debug_mutex_add_waiter(lock, &waiter, task);
  565. /* add waiting tasks to the end of the waitqueue (FIFO): */
  566. list_add_tail(&waiter.list, &lock->wait_list);
  567. waiter.task = task;
  568. if (__mutex_waiter_is_first(lock, &waiter))
  569. __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
  570. lock_contended(&lock->dep_map, ip);
  571. set_task_state(task, state);
  572. for (;;) {
  573. /*
  574. * Once we hold wait_lock, we're serialized against
  575. * mutex_unlock() handing the lock off to us, do a trylock
  576. * before testing the error conditions to make sure we pick up
  577. * the handoff.
  578. */
  579. if (__mutex_trylock(lock, first))
  580. goto acquired;
  581. /*
  582. * Check for signals and wound conditions while holding
  583. * wait_lock. This ensures the lock cancellation is ordered
  584. * against mutex_unlock() and wake-ups do not go missing.
  585. */
  586. if (unlikely(signal_pending_state(state, task))) {
  587. ret = -EINTR;
  588. goto err;
  589. }
  590. if (use_ww_ctx && ww_ctx->acquired > 0) {
  591. ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
  592. if (ret)
  593. goto err;
  594. }
  595. spin_unlock_mutex(&lock->wait_lock, flags);
  596. schedule_preempt_disabled();
  597. if (!first && __mutex_waiter_is_first(lock, &waiter)) {
  598. first = true;
  599. __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
  600. }
  601. set_task_state(task, state);
  602. /*
  603. * Here we order against unlock; we must either see it change
  604. * state back to RUNNING and fall through the next schedule(),
  605. * or we must see its unlock and acquire.
  606. */
  607. if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) ||
  608. __mutex_trylock(lock, first))
  609. break;
  610. spin_lock_mutex(&lock->wait_lock, flags);
  611. }
  612. spin_lock_mutex(&lock->wait_lock, flags);
  613. acquired:
  614. __set_task_state(task, TASK_RUNNING);
  615. mutex_remove_waiter(lock, &waiter, task);
  616. if (likely(list_empty(&lock->wait_list)))
  617. __mutex_clear_flag(lock, MUTEX_FLAGS);
  618. debug_mutex_free_waiter(&waiter);
  619. skip_wait:
  620. /* got the lock - cleanup and rejoice! */
  621. lock_acquired(&lock->dep_map, ip);
  622. if (use_ww_ctx)
  623. ww_mutex_set_context_slowpath(ww, ww_ctx);
  624. spin_unlock_mutex(&lock->wait_lock, flags);
  625. preempt_enable();
  626. return 0;
  627. err:
  628. __set_task_state(task, TASK_RUNNING);
  629. mutex_remove_waiter(lock, &waiter, task);
  630. spin_unlock_mutex(&lock->wait_lock, flags);
  631. debug_mutex_free_waiter(&waiter);
  632. mutex_release(&lock->dep_map, 1, ip);
  633. preempt_enable();
  634. return ret;
  635. }
  636. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  637. void __sched
  638. mutex_lock_nested(struct mutex *lock, unsigned int subclass)
  639. {
  640. might_sleep();
  641. __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
  642. subclass, NULL, _RET_IP_, NULL, 0);
  643. }
  644. EXPORT_SYMBOL_GPL(mutex_lock_nested);
  645. void __sched
  646. _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
  647. {
  648. might_sleep();
  649. __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
  650. 0, nest, _RET_IP_, NULL, 0);
  651. }
  652. EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
  653. int __sched
  654. mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
  655. {
  656. might_sleep();
  657. return __mutex_lock_common(lock, TASK_KILLABLE,
  658. subclass, NULL, _RET_IP_, NULL, 0);
  659. }
  660. EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
  661. int __sched
  662. mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
  663. {
  664. might_sleep();
  665. return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
  666. subclass, NULL, _RET_IP_, NULL, 0);
  667. }
  668. EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
  669. static inline int
  670. ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  671. {
  672. #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
  673. unsigned tmp;
  674. if (ctx->deadlock_inject_countdown-- == 0) {
  675. tmp = ctx->deadlock_inject_interval;
  676. if (tmp > UINT_MAX/4)
  677. tmp = UINT_MAX;
  678. else
  679. tmp = tmp*2 + tmp + tmp/2;
  680. ctx->deadlock_inject_interval = tmp;
  681. ctx->deadlock_inject_countdown = tmp;
  682. ctx->contending_lock = lock;
  683. ww_mutex_unlock(lock);
  684. return -EDEADLK;
  685. }
  686. #endif
  687. return 0;
  688. }
  689. int __sched
  690. __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  691. {
  692. int ret;
  693. might_sleep();
  694. ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
  695. 0, &ctx->dep_map, _RET_IP_, ctx, 1);
  696. if (!ret && ctx->acquired > 1)
  697. return ww_mutex_deadlock_injection(lock, ctx);
  698. return ret;
  699. }
  700. EXPORT_SYMBOL_GPL(__ww_mutex_lock);
  701. int __sched
  702. __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  703. {
  704. int ret;
  705. might_sleep();
  706. ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
  707. 0, &ctx->dep_map, _RET_IP_, ctx, 1);
  708. if (!ret && ctx->acquired > 1)
  709. return ww_mutex_deadlock_injection(lock, ctx);
  710. return ret;
  711. }
  712. EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
  713. #endif
  714. /*
  715. * Release the lock, slowpath:
  716. */
  717. static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
  718. {
  719. struct task_struct *next = NULL;
  720. unsigned long owner, flags;
  721. WAKE_Q(wake_q);
  722. mutex_release(&lock->dep_map, 1, ip);
  723. /*
  724. * Release the lock before (potentially) taking the spinlock such that
  725. * other contenders can get on with things ASAP.
  726. *
  727. * Except when HANDOFF, in that case we must not clear the owner field,
  728. * but instead set it to the top waiter.
  729. */
  730. owner = atomic_long_read(&lock->owner);
  731. for (;;) {
  732. unsigned long old;
  733. #ifdef CONFIG_DEBUG_MUTEXES
  734. DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
  735. #endif
  736. if (owner & MUTEX_FLAG_HANDOFF)
  737. break;
  738. old = atomic_long_cmpxchg_release(&lock->owner, owner,
  739. __owner_flags(owner));
  740. if (old == owner) {
  741. if (owner & MUTEX_FLAG_WAITERS)
  742. break;
  743. return;
  744. }
  745. owner = old;
  746. }
  747. spin_lock_mutex(&lock->wait_lock, flags);
  748. debug_mutex_unlock(lock);
  749. if (!list_empty(&lock->wait_list)) {
  750. /* get the first entry from the wait-list: */
  751. struct mutex_waiter *waiter =
  752. list_first_entry(&lock->wait_list,
  753. struct mutex_waiter, list);
  754. next = waiter->task;
  755. debug_mutex_wake_waiter(lock, waiter);
  756. wake_q_add(&wake_q, next);
  757. }
  758. if (owner & MUTEX_FLAG_HANDOFF)
  759. __mutex_handoff(lock, next);
  760. spin_unlock_mutex(&lock->wait_lock, flags);
  761. wake_up_q(&wake_q);
  762. }
  763. #ifndef CONFIG_DEBUG_LOCK_ALLOC
  764. /*
  765. * Here come the less common (and hence less performance-critical) APIs:
  766. * mutex_lock_interruptible() and mutex_trylock().
  767. */
  768. static noinline int __sched
  769. __mutex_lock_killable_slowpath(struct mutex *lock);
  770. static noinline int __sched
  771. __mutex_lock_interruptible_slowpath(struct mutex *lock);
  772. /**
  773. * mutex_lock_interruptible - acquire the mutex, interruptible
  774. * @lock: the mutex to be acquired
  775. *
  776. * Lock the mutex like mutex_lock(), and return 0 if the mutex has
  777. * been acquired or sleep until the mutex becomes available. If a
  778. * signal arrives while waiting for the lock then this function
  779. * returns -EINTR.
  780. *
  781. * This function is similar to (but not equivalent to) down_interruptible().
  782. */
  783. int __sched mutex_lock_interruptible(struct mutex *lock)
  784. {
  785. might_sleep();
  786. if (__mutex_trylock_fast(lock))
  787. return 0;
  788. return __mutex_lock_interruptible_slowpath(lock);
  789. }
  790. EXPORT_SYMBOL(mutex_lock_interruptible);
  791. int __sched mutex_lock_killable(struct mutex *lock)
  792. {
  793. might_sleep();
  794. if (__mutex_trylock_fast(lock))
  795. return 0;
  796. return __mutex_lock_killable_slowpath(lock);
  797. }
  798. EXPORT_SYMBOL(mutex_lock_killable);
  799. static noinline void __sched
  800. __mutex_lock_slowpath(struct mutex *lock)
  801. {
  802. __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
  803. NULL, _RET_IP_, NULL, 0);
  804. }
  805. static noinline int __sched
  806. __mutex_lock_killable_slowpath(struct mutex *lock)
  807. {
  808. return __mutex_lock_common(lock, TASK_KILLABLE, 0,
  809. NULL, _RET_IP_, NULL, 0);
  810. }
  811. static noinline int __sched
  812. __mutex_lock_interruptible_slowpath(struct mutex *lock)
  813. {
  814. return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
  815. NULL, _RET_IP_, NULL, 0);
  816. }
  817. static noinline int __sched
  818. __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  819. {
  820. return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
  821. NULL, _RET_IP_, ctx, 1);
  822. }
  823. static noinline int __sched
  824. __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
  825. struct ww_acquire_ctx *ctx)
  826. {
  827. return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
  828. NULL, _RET_IP_, ctx, 1);
  829. }
  830. #endif
  831. /**
  832. * mutex_trylock - try to acquire the mutex, without waiting
  833. * @lock: the mutex to be acquired
  834. *
  835. * Try to acquire the mutex atomically. Returns 1 if the mutex
  836. * has been acquired successfully, and 0 on contention.
  837. *
  838. * NOTE: this function follows the spin_trylock() convention, so
  839. * it is negated from the down_trylock() return values! Be careful
  840. * about this when converting semaphore users to mutexes.
  841. *
  842. * This function must not be used in interrupt context. The
  843. * mutex must be released by the same task that acquired it.
  844. */
  845. int __sched mutex_trylock(struct mutex *lock)
  846. {
  847. bool locked = __mutex_trylock(lock, false);
  848. if (locked)
  849. mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  850. return locked;
  851. }
  852. EXPORT_SYMBOL(mutex_trylock);
  853. #ifndef CONFIG_DEBUG_LOCK_ALLOC
  854. int __sched
  855. __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  856. {
  857. might_sleep();
  858. if (__mutex_trylock_fast(&lock->base)) {
  859. ww_mutex_set_context_fastpath(lock, ctx);
  860. return 0;
  861. }
  862. return __ww_mutex_lock_slowpath(lock, ctx);
  863. }
  864. EXPORT_SYMBOL(__ww_mutex_lock);
  865. int __sched
  866. __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  867. {
  868. might_sleep();
  869. if (__mutex_trylock_fast(&lock->base)) {
  870. ww_mutex_set_context_fastpath(lock, ctx);
  871. return 0;
  872. }
  873. return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
  874. }
  875. EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
  876. #endif
  877. /**
  878. * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
  879. * @cnt: the atomic which we are to dec
  880. * @lock: the mutex to return holding if we dec to 0
  881. *
  882. * return true and hold lock if we dec to 0, return false otherwise
  883. */
  884. int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
  885. {
  886. /* dec if we can't possibly hit 0 */
  887. if (atomic_add_unless(cnt, -1, 1))
  888. return 0;
  889. /* we might hit 0, so take the lock */
  890. mutex_lock(lock);
  891. if (!atomic_dec_and_test(cnt)) {
  892. /* when we actually did the dec, we didn't hit 0 */
  893. mutex_unlock(lock);
  894. return 0;
  895. }
  896. /* we hit 0, and we hold the lock */
  897. return 1;
  898. }
  899. EXPORT_SYMBOL(atomic_dec_and_mutex_lock);