hwspinlock.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Hardware spinlock public header
  4. *
  5. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
  6. *
  7. * Contact: Ohad Ben-Cohen <ohad@wizery.com>
  8. */
  9. #ifndef __LINUX_HWSPINLOCK_H
  10. #define __LINUX_HWSPINLOCK_H
  11. #include <linux/err.h>
  12. #include <linux/sched.h>
  13. /* hwspinlock mode argument */
  14. #define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
  15. #define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
  16. #define HWLOCK_RAW 0x03
  17. struct device;
  18. struct device_node;
  19. struct hwspinlock;
  20. struct hwspinlock_device;
  21. struct hwspinlock_ops;
  22. /**
  23. * struct hwspinlock_pdata - platform data for hwspinlock drivers
  24. * @base_id: base id for this hwspinlock device
  25. *
  26. * hwspinlock devices provide system-wide hardware locks that are used
  27. * by remote processors that have no other way to achieve synchronization.
  28. *
  29. * To achieve that, each physical lock must have a system-wide id number
  30. * that is agreed upon, otherwise remote processors can't possibly assume
  31. * they're using the same hardware lock.
  32. *
  33. * Usually boards have a single hwspinlock device, which provides several
  34. * hwspinlocks, and in this case, they can be trivially numbered 0 to
  35. * (num-of-locks - 1).
  36. *
  37. * In case boards have several hwspinlocks devices, a different base id
  38. * should be used for each hwspinlock device (they can't all use 0 as
  39. * a starting id!).
  40. *
  41. * This platform data structure should be used to provide the base id
  42. * for each device (which is trivially 0 when only a single hwspinlock
  43. * device exists). It can be shared between different platforms, hence
  44. * its location.
  45. */
  46. struct hwspinlock_pdata {
  47. int base_id;
  48. };
  49. #if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
  50. int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
  51. const struct hwspinlock_ops *ops, int base_id, int num_locks);
  52. int hwspin_lock_unregister(struct hwspinlock_device *bank);
  53. struct hwspinlock *hwspin_lock_request(void);
  54. struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
  55. int hwspin_lock_free(struct hwspinlock *hwlock);
  56. int of_hwspin_lock_get_id(struct device_node *np, int index);
  57. int hwspin_lock_get_id(struct hwspinlock *hwlock);
  58. int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
  59. unsigned long *);
  60. int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
  61. void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
  62. #else /* !CONFIG_HWSPINLOCK */
  63. /*
  64. * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
  65. * enabled. We prefer to silently succeed in this case, and let the
  66. * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
  67. * required on a given setup, users will still work.
  68. *
  69. * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
  70. * we _do_ want users to fail (no point in registering hwspinlock instances if
  71. * the framework is not available).
  72. *
  73. * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
  74. * users. Others, which care, can still check this with IS_ERR.
  75. */
  76. static inline struct hwspinlock *hwspin_lock_request(void)
  77. {
  78. return ERR_PTR(-ENODEV);
  79. }
  80. static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
  81. {
  82. return ERR_PTR(-ENODEV);
  83. }
  84. static inline int hwspin_lock_free(struct hwspinlock *hwlock)
  85. {
  86. return 0;
  87. }
  88. static inline
  89. int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
  90. int mode, unsigned long *flags)
  91. {
  92. return 0;
  93. }
  94. static inline
  95. int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
  96. {
  97. return 0;
  98. }
  99. static inline
  100. void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
  101. {
  102. }
  103. static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
  104. {
  105. return 0;
  106. }
  107. static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
  108. {
  109. return 0;
  110. }
  111. #endif /* !CONFIG_HWSPINLOCK */
  112. /**
  113. * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
  114. * @hwlock: an hwspinlock which we want to trylock
  115. * @flags: a pointer to where the caller's interrupt state will be saved at
  116. *
  117. * This function attempts to lock the underlying hwspinlock, and will
  118. * immediately fail if the hwspinlock is already locked.
  119. *
  120. * Upon a successful return from this function, preemption and local
  121. * interrupts are disabled (previous interrupts state is saved at @flags),
  122. * so the caller must not sleep, and is advised to release the hwspinlock
  123. * as soon as possible.
  124. *
  125. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  126. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  127. */
  128. static inline
  129. int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
  130. {
  131. return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
  132. }
  133. /**
  134. * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
  135. * @hwlock: an hwspinlock which we want to trylock
  136. *
  137. * This function attempts to lock the underlying hwspinlock, and will
  138. * immediately fail if the hwspinlock is already locked.
  139. *
  140. * Upon a successful return from this function, preemption and local
  141. * interrupts are disabled, so the caller must not sleep, and is advised
  142. * to release the hwspinlock as soon as possible.
  143. *
  144. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  145. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  146. */
  147. static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
  148. {
  149. return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
  150. }
  151. /**
  152. * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
  153. * @hwlock: an hwspinlock which we want to trylock
  154. *
  155. * This function attempts to lock an hwspinlock, and will immediately fail
  156. * if the hwspinlock is already taken.
  157. *
  158. * Caution: User must protect the routine of getting hardware lock with mutex
  159. * or spinlock to avoid dead-lock, that will let user can do some time-consuming
  160. * or sleepable operations under the hardware lock.
  161. *
  162. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  163. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  164. */
  165. static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
  166. {
  167. return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
  168. }
  169. /**
  170. * hwspin_trylock() - attempt to lock a specific hwspinlock
  171. * @hwlock: an hwspinlock which we want to trylock
  172. *
  173. * This function attempts to lock an hwspinlock, and will immediately fail
  174. * if the hwspinlock is already taken.
  175. *
  176. * Upon a successful return from this function, preemption is disabled,
  177. * so the caller must not sleep, and is advised to release the hwspinlock
  178. * as soon as possible. This is required in order to minimize remote cores
  179. * polling on the hardware interconnect.
  180. *
  181. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  182. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  183. */
  184. static inline int hwspin_trylock(struct hwspinlock *hwlock)
  185. {
  186. return __hwspin_trylock(hwlock, 0, NULL);
  187. }
  188. /**
  189. * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
  190. * @hwlock: the hwspinlock to be locked
  191. * @to: timeout value in msecs
  192. * @flags: a pointer to where the caller's interrupt state will be saved at
  193. *
  194. * This function locks the underlying @hwlock. If the @hwlock
  195. * is already taken, the function will busy loop waiting for it to
  196. * be released, but give up when @timeout msecs have elapsed.
  197. *
  198. * Upon a successful return from this function, preemption and local interrupts
  199. * are disabled (plus previous interrupt state is saved), so the caller must
  200. * not sleep, and is advised to release the hwspinlock as soon as possible.
  201. *
  202. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  203. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  204. * busy after @timeout msecs). The function will never sleep.
  205. */
  206. static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
  207. unsigned int to, unsigned long *flags)
  208. {
  209. return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
  210. }
  211. /**
  212. * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
  213. * @hwlock: the hwspinlock to be locked
  214. * @to: timeout value in msecs
  215. *
  216. * This function locks the underlying @hwlock. If the @hwlock
  217. * is already taken, the function will busy loop waiting for it to
  218. * be released, but give up when @timeout msecs have elapsed.
  219. *
  220. * Upon a successful return from this function, preemption and local interrupts
  221. * are disabled so the caller must not sleep, and is advised to release the
  222. * hwspinlock as soon as possible.
  223. *
  224. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  225. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  226. * busy after @timeout msecs). The function will never sleep.
  227. */
  228. static inline
  229. int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
  230. {
  231. return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
  232. }
  233. /**
  234. * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
  235. * @hwlock: the hwspinlock to be locked
  236. * @to: timeout value in msecs
  237. *
  238. * This function locks the underlying @hwlock. If the @hwlock
  239. * is already taken, the function will busy loop waiting for it to
  240. * be released, but give up when @timeout msecs have elapsed.
  241. *
  242. * Caution: User must protect the routine of getting hardware lock with mutex
  243. * or spinlock to avoid dead-lock, that will let user can do some time-consuming
  244. * or sleepable operations under the hardware lock.
  245. *
  246. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  247. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  248. * busy after @timeout msecs). The function will never sleep.
  249. */
  250. static inline
  251. int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
  252. {
  253. return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
  254. }
  255. /**
  256. * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
  257. * @hwlock: the hwspinlock to be locked
  258. * @to: timeout value in msecs
  259. *
  260. * This function locks the underlying @hwlock. If the @hwlock
  261. * is already taken, the function will busy loop waiting for it to
  262. * be released, but give up when @timeout msecs have elapsed.
  263. *
  264. * Upon a successful return from this function, preemption is disabled
  265. * so the caller must not sleep, and is advised to release the hwspinlock
  266. * as soon as possible.
  267. * This is required in order to minimize remote cores polling on the
  268. * hardware interconnect.
  269. *
  270. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  271. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  272. * busy after @timeout msecs). The function will never sleep.
  273. */
  274. static inline
  275. int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
  276. {
  277. return __hwspin_lock_timeout(hwlock, to, 0, NULL);
  278. }
  279. /**
  280. * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
  281. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  282. * @flags: previous caller's interrupt state to restore
  283. *
  284. * This function will unlock a specific hwspinlock, enable preemption and
  285. * restore the previous state of the local interrupts. It should be used
  286. * to undo, e.g., hwspin_trylock_irqsave().
  287. *
  288. * @hwlock must be already locked before calling this function: it is a bug
  289. * to call unlock on a @hwlock that is already unlocked.
  290. */
  291. static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
  292. unsigned long *flags)
  293. {
  294. __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
  295. }
  296. /**
  297. * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
  298. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  299. *
  300. * This function will unlock a specific hwspinlock, enable preemption and
  301. * enable local interrupts. Should be used to undo hwspin_lock_irq().
  302. *
  303. * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
  304. * calling this function: it is a bug to call unlock on a @hwlock that is
  305. * already unlocked.
  306. */
  307. static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
  308. {
  309. __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
  310. }
  311. /**
  312. * hwspin_unlock_raw() - unlock hwspinlock
  313. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  314. *
  315. * This function will unlock a specific hwspinlock.
  316. *
  317. * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
  318. * this function: it is a bug to call unlock on a @hwlock that is already
  319. * unlocked.
  320. */
  321. static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
  322. {
  323. __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
  324. }
  325. /**
  326. * hwspin_unlock() - unlock hwspinlock
  327. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  328. *
  329. * This function will unlock a specific hwspinlock and enable preemption
  330. * back.
  331. *
  332. * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
  333. * this function: it is a bug to call unlock on a @hwlock that is already
  334. * unlocked.
  335. */
  336. static inline void hwspin_unlock(struct hwspinlock *hwlock)
  337. {
  338. __hwspin_unlock(hwlock, 0, NULL);
  339. }
  340. #endif /* __LINUX_HWSPINLOCK_H */