glock.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10. #include <linux/sched.h>
  11. #include <linux/slab.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/delay.h>
  15. #include <linux/sort.h>
  16. #include <linux/hash.h>
  17. #include <linux/jhash.h>
  18. #include <linux/kallsyms.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include <linux/list.h>
  21. #include <linux/wait.h>
  22. #include <linux/module.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/kthread.h>
  27. #include <linux/freezer.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/jiffies.h>
  30. #include <linux/rcupdate.h>
  31. #include <linux/rculist_bl.h>
  32. #include <linux/bit_spinlock.h>
  33. #include <linux/percpu.h>
  34. #include <linux/list_sort.h>
  35. #include <linux/lockref.h>
  36. #include <linux/rhashtable.h>
  37. #include "gfs2.h"
  38. #include "incore.h"
  39. #include "glock.h"
  40. #include "glops.h"
  41. #include "inode.h"
  42. #include "lops.h"
  43. #include "meta_io.h"
  44. #include "quota.h"
  45. #include "super.h"
  46. #include "util.h"
  47. #include "bmap.h"
  48. #define CREATE_TRACE_POINTS
  49. #include "trace_gfs2.h"
  50. struct gfs2_glock_iter {
  51. struct gfs2_sbd *sdp; /* incore superblock */
  52. struct rhashtable_iter hti; /* rhashtable iterator */
  53. struct gfs2_glock *gl; /* current glock struct */
  54. loff_t last_pos; /* last position */
  55. };
  56. typedef void (*glock_examiner) (struct gfs2_glock * gl);
  57. static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
  58. static struct dentry *gfs2_root;
  59. static struct workqueue_struct *glock_workqueue;
  60. struct workqueue_struct *gfs2_delete_workqueue;
  61. static LIST_HEAD(lru_list);
  62. static atomic_t lru_count = ATOMIC_INIT(0);
  63. static DEFINE_SPINLOCK(lru_lock);
  64. #define GFS2_GL_HASH_SHIFT 15
  65. #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
  66. static const struct rhashtable_params ht_parms = {
  67. .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
  68. .key_len = offsetofend(struct lm_lockname, ln_type),
  69. .key_offset = offsetof(struct gfs2_glock, gl_name),
  70. .head_offset = offsetof(struct gfs2_glock, gl_node),
  71. };
  72. static struct rhashtable gl_hash_table;
  73. #define GLOCK_WAIT_TABLE_BITS 12
  74. #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
  75. static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
  76. struct wait_glock_queue {
  77. struct lm_lockname *name;
  78. wait_queue_entry_t wait;
  79. };
  80. static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
  81. int sync, void *key)
  82. {
  83. struct wait_glock_queue *wait_glock =
  84. container_of(wait, struct wait_glock_queue, wait);
  85. struct lm_lockname *wait_name = wait_glock->name;
  86. struct lm_lockname *wake_name = key;
  87. if (wake_name->ln_sbd != wait_name->ln_sbd ||
  88. wake_name->ln_number != wait_name->ln_number ||
  89. wake_name->ln_type != wait_name->ln_type)
  90. return 0;
  91. return autoremove_wake_function(wait, mode, sync, key);
  92. }
  93. static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
  94. {
  95. u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
  96. return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
  97. }
  98. /**
  99. * wake_up_glock - Wake up waiters on a glock
  100. * @gl: the glock
  101. */
  102. static void wake_up_glock(struct gfs2_glock *gl)
  103. {
  104. wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
  105. if (waitqueue_active(wq))
  106. __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
  107. }
  108. static void gfs2_glock_dealloc(struct rcu_head *rcu)
  109. {
  110. struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
  111. if (gl->gl_ops->go_flags & GLOF_ASPACE) {
  112. kmem_cache_free(gfs2_glock_aspace_cachep, gl);
  113. } else {
  114. kfree(gl->gl_lksb.sb_lvbptr);
  115. kmem_cache_free(gfs2_glock_cachep, gl);
  116. }
  117. }
  118. void gfs2_glock_free(struct gfs2_glock *gl)
  119. {
  120. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  121. BUG_ON(atomic_read(&gl->gl_revokes));
  122. rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
  123. smp_mb();
  124. wake_up_glock(gl);
  125. call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
  126. if (atomic_dec_and_test(&sdp->sd_glock_disposal))
  127. wake_up(&sdp->sd_glock_wait);
  128. }
  129. /**
  130. * gfs2_glock_hold() - increment reference count on glock
  131. * @gl: The glock to hold
  132. *
  133. */
  134. void gfs2_glock_hold(struct gfs2_glock *gl)
  135. {
  136. GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
  137. lockref_get(&gl->gl_lockref);
  138. }
  139. /**
  140. * demote_ok - Check to see if it's ok to unlock a glock
  141. * @gl: the glock
  142. *
  143. * Returns: 1 if it's ok
  144. */
  145. static int demote_ok(const struct gfs2_glock *gl)
  146. {
  147. const struct gfs2_glock_operations *glops = gl->gl_ops;
  148. if (gl->gl_state == LM_ST_UNLOCKED)
  149. return 0;
  150. if (!list_empty(&gl->gl_holders))
  151. return 0;
  152. if (glops->go_demote_ok)
  153. return glops->go_demote_ok(gl);
  154. return 1;
  155. }
  156. void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
  157. {
  158. if (!(gl->gl_ops->go_flags & GLOF_LRU))
  159. return;
  160. spin_lock(&lru_lock);
  161. list_del(&gl->gl_lru);
  162. list_add_tail(&gl->gl_lru, &lru_list);
  163. if (!test_bit(GLF_LRU, &gl->gl_flags)) {
  164. set_bit(GLF_LRU, &gl->gl_flags);
  165. atomic_inc(&lru_count);
  166. }
  167. spin_unlock(&lru_lock);
  168. }
  169. static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
  170. {
  171. if (!(gl->gl_ops->go_flags & GLOF_LRU))
  172. return;
  173. spin_lock(&lru_lock);
  174. if (test_bit(GLF_LRU, &gl->gl_flags)) {
  175. list_del_init(&gl->gl_lru);
  176. atomic_dec(&lru_count);
  177. clear_bit(GLF_LRU, &gl->gl_flags);
  178. }
  179. spin_unlock(&lru_lock);
  180. }
  181. /*
  182. * Enqueue the glock on the work queue. Passes one glock reference on to the
  183. * work queue.
  184. */
  185. static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
  186. if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
  187. /*
  188. * We are holding the lockref spinlock, and the work was still
  189. * queued above. The queued work (glock_work_func) takes that
  190. * spinlock before dropping its glock reference(s), so it
  191. * cannot have dropped them in the meantime.
  192. */
  193. GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
  194. gl->gl_lockref.count--;
  195. }
  196. }
  197. static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
  198. spin_lock(&gl->gl_lockref.lock);
  199. __gfs2_glock_queue_work(gl, delay);
  200. spin_unlock(&gl->gl_lockref.lock);
  201. }
  202. static void __gfs2_glock_put(struct gfs2_glock *gl)
  203. {
  204. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  205. struct address_space *mapping = gfs2_glock2aspace(gl);
  206. lockref_mark_dead(&gl->gl_lockref);
  207. gfs2_glock_remove_from_lru(gl);
  208. spin_unlock(&gl->gl_lockref.lock);
  209. GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
  210. GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
  211. trace_gfs2_glock_put(gl);
  212. sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
  213. }
  214. /*
  215. * Cause the glock to be put in work queue context.
  216. */
  217. void gfs2_glock_queue_put(struct gfs2_glock *gl)
  218. {
  219. gfs2_glock_queue_work(gl, 0);
  220. }
  221. /**
  222. * gfs2_glock_put() - Decrement reference count on glock
  223. * @gl: The glock to put
  224. *
  225. */
  226. void gfs2_glock_put(struct gfs2_glock *gl)
  227. {
  228. if (lockref_put_or_lock(&gl->gl_lockref))
  229. return;
  230. __gfs2_glock_put(gl);
  231. }
  232. /**
  233. * may_grant - check if its ok to grant a new lock
  234. * @gl: The glock
  235. * @gh: The lock request which we wish to grant
  236. *
  237. * Returns: true if its ok to grant the lock
  238. */
  239. static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
  240. {
  241. const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
  242. if ((gh->gh_state == LM_ST_EXCLUSIVE ||
  243. gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
  244. return 0;
  245. if (gl->gl_state == gh->gh_state)
  246. return 1;
  247. if (gh->gh_flags & GL_EXACT)
  248. return 0;
  249. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  250. if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
  251. return 1;
  252. if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
  253. return 1;
  254. }
  255. if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
  256. return 1;
  257. return 0;
  258. }
  259. static void gfs2_holder_wake(struct gfs2_holder *gh)
  260. {
  261. clear_bit(HIF_WAIT, &gh->gh_iflags);
  262. smp_mb__after_atomic();
  263. wake_up_bit(&gh->gh_iflags, HIF_WAIT);
  264. }
  265. /**
  266. * do_error - Something unexpected has happened during a lock request
  267. *
  268. */
  269. static void do_error(struct gfs2_glock *gl, const int ret)
  270. {
  271. struct gfs2_holder *gh, *tmp;
  272. list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
  273. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  274. continue;
  275. if (ret & LM_OUT_ERROR)
  276. gh->gh_error = -EIO;
  277. else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
  278. gh->gh_error = GLR_TRYFAILED;
  279. else
  280. continue;
  281. list_del_init(&gh->gh_list);
  282. trace_gfs2_glock_queue(gh, 0);
  283. gfs2_holder_wake(gh);
  284. }
  285. }
  286. /**
  287. * do_promote - promote as many requests as possible on the current queue
  288. * @gl: The glock
  289. *
  290. * Returns: 1 if there is a blocked holder at the head of the list, or 2
  291. * if a type specific operation is underway.
  292. */
  293. static int do_promote(struct gfs2_glock *gl)
  294. __releases(&gl->gl_lockref.lock)
  295. __acquires(&gl->gl_lockref.lock)
  296. {
  297. const struct gfs2_glock_operations *glops = gl->gl_ops;
  298. struct gfs2_holder *gh, *tmp;
  299. int ret;
  300. restart:
  301. list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
  302. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  303. continue;
  304. if (may_grant(gl, gh)) {
  305. if (gh->gh_list.prev == &gl->gl_holders &&
  306. glops->go_lock) {
  307. spin_unlock(&gl->gl_lockref.lock);
  308. /* FIXME: eliminate this eventually */
  309. ret = glops->go_lock(gh);
  310. spin_lock(&gl->gl_lockref.lock);
  311. if (ret) {
  312. if (ret == 1)
  313. return 2;
  314. gh->gh_error = ret;
  315. list_del_init(&gh->gh_list);
  316. trace_gfs2_glock_queue(gh, 0);
  317. gfs2_holder_wake(gh);
  318. goto restart;
  319. }
  320. set_bit(HIF_HOLDER, &gh->gh_iflags);
  321. trace_gfs2_promote(gh, 1);
  322. gfs2_holder_wake(gh);
  323. goto restart;
  324. }
  325. set_bit(HIF_HOLDER, &gh->gh_iflags);
  326. trace_gfs2_promote(gh, 0);
  327. gfs2_holder_wake(gh);
  328. continue;
  329. }
  330. if (gh->gh_list.prev == &gl->gl_holders)
  331. return 1;
  332. do_error(gl, 0);
  333. break;
  334. }
  335. return 0;
  336. }
  337. /**
  338. * find_first_waiter - find the first gh that's waiting for the glock
  339. * @gl: the glock
  340. */
  341. static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
  342. {
  343. struct gfs2_holder *gh;
  344. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  345. if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
  346. return gh;
  347. }
  348. return NULL;
  349. }
  350. /**
  351. * state_change - record that the glock is now in a different state
  352. * @gl: the glock
  353. * @new_state the new state
  354. *
  355. */
  356. static void state_change(struct gfs2_glock *gl, unsigned int new_state)
  357. {
  358. int held1, held2;
  359. held1 = (gl->gl_state != LM_ST_UNLOCKED);
  360. held2 = (new_state != LM_ST_UNLOCKED);
  361. if (held1 != held2) {
  362. GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
  363. if (held2)
  364. gl->gl_lockref.count++;
  365. else
  366. gl->gl_lockref.count--;
  367. }
  368. if (held1 && held2 && list_empty(&gl->gl_holders))
  369. clear_bit(GLF_QUEUED, &gl->gl_flags);
  370. if (new_state != gl->gl_target)
  371. /* shorten our minimum hold time */
  372. gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
  373. GL_GLOCK_MIN_HOLD);
  374. gl->gl_state = new_state;
  375. gl->gl_tchange = jiffies;
  376. }
  377. static void gfs2_demote_wake(struct gfs2_glock *gl)
  378. {
  379. gl->gl_demote_state = LM_ST_EXCLUSIVE;
  380. clear_bit(GLF_DEMOTE, &gl->gl_flags);
  381. smp_mb__after_atomic();
  382. wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
  383. }
  384. /**
  385. * finish_xmote - The DLM has replied to one of our lock requests
  386. * @gl: The glock
  387. * @ret: The status from the DLM
  388. *
  389. */
  390. static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
  391. {
  392. const struct gfs2_glock_operations *glops = gl->gl_ops;
  393. struct gfs2_holder *gh;
  394. unsigned state = ret & LM_OUT_ST_MASK;
  395. int rv;
  396. spin_lock(&gl->gl_lockref.lock);
  397. trace_gfs2_glock_state_change(gl, state);
  398. state_change(gl, state);
  399. gh = find_first_waiter(gl);
  400. /* Demote to UN request arrived during demote to SH or DF */
  401. if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
  402. state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
  403. gl->gl_target = LM_ST_UNLOCKED;
  404. /* Check for state != intended state */
  405. if (unlikely(state != gl->gl_target)) {
  406. if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
  407. /* move to back of queue and try next entry */
  408. if (ret & LM_OUT_CANCELED) {
  409. if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
  410. list_move_tail(&gh->gh_list, &gl->gl_holders);
  411. gh = find_first_waiter(gl);
  412. gl->gl_target = gh->gh_state;
  413. goto retry;
  414. }
  415. /* Some error or failed "try lock" - report it */
  416. if ((ret & LM_OUT_ERROR) ||
  417. (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
  418. gl->gl_target = gl->gl_state;
  419. do_error(gl, ret);
  420. goto out;
  421. }
  422. }
  423. switch(state) {
  424. /* Unlocked due to conversion deadlock, try again */
  425. case LM_ST_UNLOCKED:
  426. retry:
  427. do_xmote(gl, gh, gl->gl_target);
  428. break;
  429. /* Conversion fails, unlock and try again */
  430. case LM_ST_SHARED:
  431. case LM_ST_DEFERRED:
  432. do_xmote(gl, gh, LM_ST_UNLOCKED);
  433. break;
  434. default: /* Everything else */
  435. pr_err("wanted %u got %u\n", gl->gl_target, state);
  436. GLOCK_BUG_ON(gl, 1);
  437. }
  438. spin_unlock(&gl->gl_lockref.lock);
  439. return;
  440. }
  441. /* Fast path - we got what we asked for */
  442. if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
  443. gfs2_demote_wake(gl);
  444. if (state != LM_ST_UNLOCKED) {
  445. if (glops->go_xmote_bh) {
  446. spin_unlock(&gl->gl_lockref.lock);
  447. rv = glops->go_xmote_bh(gl, gh);
  448. spin_lock(&gl->gl_lockref.lock);
  449. if (rv) {
  450. do_error(gl, rv);
  451. goto out;
  452. }
  453. }
  454. rv = do_promote(gl);
  455. if (rv == 2)
  456. goto out_locked;
  457. }
  458. out:
  459. clear_bit(GLF_LOCK, &gl->gl_flags);
  460. out_locked:
  461. spin_unlock(&gl->gl_lockref.lock);
  462. }
  463. /**
  464. * do_xmote - Calls the DLM to change the state of a lock
  465. * @gl: The lock state
  466. * @gh: The holder (only for promotes)
  467. * @target: The target lock state
  468. *
  469. */
  470. static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
  471. __releases(&gl->gl_lockref.lock)
  472. __acquires(&gl->gl_lockref.lock)
  473. {
  474. const struct gfs2_glock_operations *glops = gl->gl_ops;
  475. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  476. unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
  477. int ret;
  478. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
  479. target != LM_ST_UNLOCKED)
  480. return;
  481. lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
  482. LM_FLAG_PRIORITY);
  483. GLOCK_BUG_ON(gl, gl->gl_state == target);
  484. GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
  485. if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
  486. glops->go_inval) {
  487. set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
  488. do_error(gl, 0); /* Fail queued try locks */
  489. }
  490. gl->gl_req = target;
  491. set_bit(GLF_BLOCKING, &gl->gl_flags);
  492. if ((gl->gl_req == LM_ST_UNLOCKED) ||
  493. (gl->gl_state == LM_ST_EXCLUSIVE) ||
  494. (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
  495. clear_bit(GLF_BLOCKING, &gl->gl_flags);
  496. spin_unlock(&gl->gl_lockref.lock);
  497. if (glops->go_sync)
  498. glops->go_sync(gl);
  499. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
  500. glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
  501. clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
  502. gfs2_glock_hold(gl);
  503. if (sdp->sd_lockstruct.ls_ops->lm_lock) {
  504. /* lock_dlm */
  505. ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
  506. if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
  507. target == LM_ST_UNLOCKED &&
  508. test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
  509. finish_xmote(gl, target);
  510. gfs2_glock_queue_work(gl, 0);
  511. }
  512. else if (ret) {
  513. pr_err("lm_lock ret %d\n", ret);
  514. GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
  515. &sdp->sd_flags));
  516. }
  517. } else { /* lock_nolock */
  518. finish_xmote(gl, target);
  519. gfs2_glock_queue_work(gl, 0);
  520. }
  521. spin_lock(&gl->gl_lockref.lock);
  522. }
  523. /**
  524. * find_first_holder - find the first "holder" gh
  525. * @gl: the glock
  526. */
  527. static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
  528. {
  529. struct gfs2_holder *gh;
  530. if (!list_empty(&gl->gl_holders)) {
  531. gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
  532. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  533. return gh;
  534. }
  535. return NULL;
  536. }
  537. /**
  538. * run_queue - do all outstanding tasks related to a glock
  539. * @gl: The glock in question
  540. * @nonblock: True if we must not block in run_queue
  541. *
  542. */
  543. static void run_queue(struct gfs2_glock *gl, const int nonblock)
  544. __releases(&gl->gl_lockref.lock)
  545. __acquires(&gl->gl_lockref.lock)
  546. {
  547. struct gfs2_holder *gh = NULL;
  548. int ret;
  549. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  550. return;
  551. GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
  552. if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
  553. gl->gl_demote_state != gl->gl_state) {
  554. if (find_first_holder(gl))
  555. goto out_unlock;
  556. if (nonblock)
  557. goto out_sched;
  558. set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
  559. GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
  560. gl->gl_target = gl->gl_demote_state;
  561. } else {
  562. if (test_bit(GLF_DEMOTE, &gl->gl_flags))
  563. gfs2_demote_wake(gl);
  564. ret = do_promote(gl);
  565. if (ret == 0)
  566. goto out_unlock;
  567. if (ret == 2)
  568. goto out;
  569. gh = find_first_waiter(gl);
  570. gl->gl_target = gh->gh_state;
  571. if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
  572. do_error(gl, 0); /* Fail queued try locks */
  573. }
  574. do_xmote(gl, gh, gl->gl_target);
  575. out:
  576. return;
  577. out_sched:
  578. clear_bit(GLF_LOCK, &gl->gl_flags);
  579. smp_mb__after_atomic();
  580. gl->gl_lockref.count++;
  581. __gfs2_glock_queue_work(gl, 0);
  582. return;
  583. out_unlock:
  584. clear_bit(GLF_LOCK, &gl->gl_flags);
  585. smp_mb__after_atomic();
  586. return;
  587. }
  588. static void delete_work_func(struct work_struct *work)
  589. {
  590. struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
  591. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  592. struct inode *inode;
  593. u64 no_addr = gl->gl_name.ln_number;
  594. /* If someone's using this glock to create a new dinode, the block must
  595. have been freed by another node, then re-used, in which case our
  596. iopen callback is too late after the fact. Ignore it. */
  597. if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
  598. goto out;
  599. inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
  600. if (inode && !IS_ERR(inode)) {
  601. d_prune_aliases(inode);
  602. iput(inode);
  603. }
  604. out:
  605. gfs2_glock_put(gl);
  606. }
  607. static void glock_work_func(struct work_struct *work)
  608. {
  609. unsigned long delay = 0;
  610. struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
  611. unsigned int drop_refs = 1;
  612. if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
  613. finish_xmote(gl, gl->gl_reply);
  614. drop_refs++;
  615. }
  616. spin_lock(&gl->gl_lockref.lock);
  617. if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  618. gl->gl_state != LM_ST_UNLOCKED &&
  619. gl->gl_demote_state != LM_ST_EXCLUSIVE) {
  620. unsigned long holdtime, now = jiffies;
  621. holdtime = gl->gl_tchange + gl->gl_hold_time;
  622. if (time_before(now, holdtime))
  623. delay = holdtime - now;
  624. if (!delay) {
  625. clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
  626. set_bit(GLF_DEMOTE, &gl->gl_flags);
  627. }
  628. }
  629. run_queue(gl, 0);
  630. if (delay) {
  631. /* Keep one glock reference for the work we requeue. */
  632. drop_refs--;
  633. if (gl->gl_name.ln_type != LM_TYPE_INODE)
  634. delay = 0;
  635. __gfs2_glock_queue_work(gl, delay);
  636. }
  637. /*
  638. * Drop the remaining glock references manually here. (Mind that
  639. * __gfs2_glock_queue_work depends on the lockref spinlock begin held
  640. * here as well.)
  641. */
  642. gl->gl_lockref.count -= drop_refs;
  643. if (!gl->gl_lockref.count) {
  644. __gfs2_glock_put(gl);
  645. return;
  646. }
  647. spin_unlock(&gl->gl_lockref.lock);
  648. }
  649. static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
  650. struct gfs2_glock *new)
  651. {
  652. struct wait_glock_queue wait;
  653. wait_queue_head_t *wq = glock_waitqueue(name);
  654. struct gfs2_glock *gl;
  655. wait.name = name;
  656. init_wait(&wait.wait);
  657. wait.wait.func = glock_wake_function;
  658. again:
  659. prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  660. rcu_read_lock();
  661. if (new) {
  662. gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
  663. &new->gl_node, ht_parms);
  664. if (IS_ERR(gl))
  665. goto out;
  666. } else {
  667. gl = rhashtable_lookup_fast(&gl_hash_table,
  668. name, ht_parms);
  669. }
  670. if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
  671. rcu_read_unlock();
  672. schedule();
  673. goto again;
  674. }
  675. out:
  676. rcu_read_unlock();
  677. finish_wait(wq, &wait.wait);
  678. return gl;
  679. }
  680. /**
  681. * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
  682. * @sdp: The GFS2 superblock
  683. * @number: the lock number
  684. * @glops: The glock_operations to use
  685. * @create: If 0, don't create the glock if it doesn't exist
  686. * @glp: the glock is returned here
  687. *
  688. * This does not lock a glock, just finds/creates structures for one.
  689. *
  690. * Returns: errno
  691. */
  692. int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
  693. const struct gfs2_glock_operations *glops, int create,
  694. struct gfs2_glock **glp)
  695. {
  696. struct super_block *s = sdp->sd_vfs;
  697. struct lm_lockname name = { .ln_number = number,
  698. .ln_type = glops->go_type,
  699. .ln_sbd = sdp };
  700. struct gfs2_glock *gl, *tmp;
  701. struct address_space *mapping;
  702. struct kmem_cache *cachep;
  703. int ret = 0;
  704. gl = find_insert_glock(&name, NULL);
  705. if (gl) {
  706. *glp = gl;
  707. return 0;
  708. }
  709. if (!create)
  710. return -ENOENT;
  711. if (glops->go_flags & GLOF_ASPACE)
  712. cachep = gfs2_glock_aspace_cachep;
  713. else
  714. cachep = gfs2_glock_cachep;
  715. gl = kmem_cache_alloc(cachep, GFP_NOFS);
  716. if (!gl)
  717. return -ENOMEM;
  718. memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
  719. if (glops->go_flags & GLOF_LVB) {
  720. gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
  721. if (!gl->gl_lksb.sb_lvbptr) {
  722. kmem_cache_free(cachep, gl);
  723. return -ENOMEM;
  724. }
  725. }
  726. atomic_inc(&sdp->sd_glock_disposal);
  727. gl->gl_node.next = NULL;
  728. gl->gl_flags = 0;
  729. gl->gl_name = name;
  730. gl->gl_lockref.count = 1;
  731. gl->gl_state = LM_ST_UNLOCKED;
  732. gl->gl_target = LM_ST_UNLOCKED;
  733. gl->gl_demote_state = LM_ST_EXCLUSIVE;
  734. gl->gl_ops = glops;
  735. gl->gl_dstamp = 0;
  736. preempt_disable();
  737. /* We use the global stats to estimate the initial per-glock stats */
  738. gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
  739. preempt_enable();
  740. gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
  741. gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
  742. gl->gl_tchange = jiffies;
  743. gl->gl_object = NULL;
  744. gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
  745. INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
  746. INIT_WORK(&gl->gl_delete, delete_work_func);
  747. mapping = gfs2_glock2aspace(gl);
  748. if (mapping) {
  749. mapping->a_ops = &gfs2_meta_aops;
  750. mapping->host = s->s_bdev->bd_inode;
  751. mapping->flags = 0;
  752. mapping_set_gfp_mask(mapping, GFP_NOFS);
  753. mapping->private_data = NULL;
  754. mapping->writeback_index = 0;
  755. }
  756. tmp = find_insert_glock(&name, gl);
  757. if (!tmp) {
  758. *glp = gl;
  759. goto out;
  760. }
  761. if (IS_ERR(tmp)) {
  762. ret = PTR_ERR(tmp);
  763. goto out_free;
  764. }
  765. *glp = tmp;
  766. out_free:
  767. kfree(gl->gl_lksb.sb_lvbptr);
  768. kmem_cache_free(cachep, gl);
  769. atomic_dec(&sdp->sd_glock_disposal);
  770. out:
  771. return ret;
  772. }
  773. /**
  774. * gfs2_holder_init - initialize a struct gfs2_holder in the default way
  775. * @gl: the glock
  776. * @state: the state we're requesting
  777. * @flags: the modifier flags
  778. * @gh: the holder structure
  779. *
  780. */
  781. void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
  782. struct gfs2_holder *gh)
  783. {
  784. INIT_LIST_HEAD(&gh->gh_list);
  785. gh->gh_gl = gl;
  786. gh->gh_ip = _RET_IP_;
  787. gh->gh_owner_pid = get_pid(task_pid(current));
  788. gh->gh_state = state;
  789. gh->gh_flags = flags;
  790. gh->gh_error = 0;
  791. gh->gh_iflags = 0;
  792. gfs2_glock_hold(gl);
  793. }
  794. /**
  795. * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
  796. * @state: the state we're requesting
  797. * @flags: the modifier flags
  798. * @gh: the holder structure
  799. *
  800. * Don't mess with the glock.
  801. *
  802. */
  803. void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
  804. {
  805. gh->gh_state = state;
  806. gh->gh_flags = flags;
  807. gh->gh_iflags = 0;
  808. gh->gh_ip = _RET_IP_;
  809. put_pid(gh->gh_owner_pid);
  810. gh->gh_owner_pid = get_pid(task_pid(current));
  811. }
  812. /**
  813. * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
  814. * @gh: the holder structure
  815. *
  816. */
  817. void gfs2_holder_uninit(struct gfs2_holder *gh)
  818. {
  819. put_pid(gh->gh_owner_pid);
  820. gfs2_glock_put(gh->gh_gl);
  821. gfs2_holder_mark_uninitialized(gh);
  822. gh->gh_ip = 0;
  823. }
  824. /**
  825. * gfs2_glock_wait - wait on a glock acquisition
  826. * @gh: the glock holder
  827. *
  828. * Returns: 0 on success
  829. */
  830. int gfs2_glock_wait(struct gfs2_holder *gh)
  831. {
  832. unsigned long time1 = jiffies;
  833. might_sleep();
  834. wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
  835. if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
  836. /* Lengthen the minimum hold time. */
  837. gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
  838. GL_GLOCK_HOLD_INCR,
  839. GL_GLOCK_MAX_HOLD);
  840. return gh->gh_error;
  841. }
  842. /**
  843. * handle_callback - process a demote request
  844. * @gl: the glock
  845. * @state: the state the caller wants us to change to
  846. *
  847. * There are only two requests that we are going to see in actual
  848. * practise: LM_ST_SHARED and LM_ST_UNLOCKED
  849. */
  850. static void handle_callback(struct gfs2_glock *gl, unsigned int state,
  851. unsigned long delay, bool remote)
  852. {
  853. int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
  854. set_bit(bit, &gl->gl_flags);
  855. if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
  856. gl->gl_demote_state = state;
  857. gl->gl_demote_time = jiffies;
  858. } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
  859. gl->gl_demote_state != state) {
  860. gl->gl_demote_state = LM_ST_UNLOCKED;
  861. }
  862. if (gl->gl_ops->go_callback)
  863. gl->gl_ops->go_callback(gl, remote);
  864. trace_gfs2_demote_rq(gl, remote);
  865. }
  866. void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
  867. {
  868. struct va_format vaf;
  869. va_list args;
  870. va_start(args, fmt);
  871. if (seq) {
  872. seq_vprintf(seq, fmt, args);
  873. } else {
  874. vaf.fmt = fmt;
  875. vaf.va = &args;
  876. pr_err("%pV", &vaf);
  877. }
  878. va_end(args);
  879. }
  880. /**
  881. * add_to_queue - Add a holder to the wait queue (but look for recursion)
  882. * @gh: the holder structure to add
  883. *
  884. * Eventually we should move the recursive locking trap to a
  885. * debugging option or something like that. This is the fast
  886. * path and needs to have the minimum number of distractions.
  887. *
  888. */
  889. static inline void add_to_queue(struct gfs2_holder *gh)
  890. __releases(&gl->gl_lockref.lock)
  891. __acquires(&gl->gl_lockref.lock)
  892. {
  893. struct gfs2_glock *gl = gh->gh_gl;
  894. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  895. struct list_head *insert_pt = NULL;
  896. struct gfs2_holder *gh2;
  897. int try_futile = 0;
  898. BUG_ON(gh->gh_owner_pid == NULL);
  899. if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
  900. BUG();
  901. if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  902. if (test_bit(GLF_LOCK, &gl->gl_flags))
  903. try_futile = !may_grant(gl, gh);
  904. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
  905. goto fail;
  906. }
  907. list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
  908. if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
  909. (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
  910. goto trap_recursive;
  911. if (try_futile &&
  912. !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
  913. fail:
  914. gh->gh_error = GLR_TRYFAILED;
  915. gfs2_holder_wake(gh);
  916. return;
  917. }
  918. if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
  919. continue;
  920. if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
  921. insert_pt = &gh2->gh_list;
  922. }
  923. set_bit(GLF_QUEUED, &gl->gl_flags);
  924. trace_gfs2_glock_queue(gh, 1);
  925. gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
  926. gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
  927. if (likely(insert_pt == NULL)) {
  928. list_add_tail(&gh->gh_list, &gl->gl_holders);
  929. if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
  930. goto do_cancel;
  931. return;
  932. }
  933. list_add_tail(&gh->gh_list, insert_pt);
  934. do_cancel:
  935. gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
  936. if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
  937. spin_unlock(&gl->gl_lockref.lock);
  938. if (sdp->sd_lockstruct.ls_ops->lm_cancel)
  939. sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
  940. spin_lock(&gl->gl_lockref.lock);
  941. }
  942. return;
  943. trap_recursive:
  944. pr_err("original: %pSR\n", (void *)gh2->gh_ip);
  945. pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
  946. pr_err("lock type: %d req lock state : %d\n",
  947. gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
  948. pr_err("new: %pSR\n", (void *)gh->gh_ip);
  949. pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
  950. pr_err("lock type: %d req lock state : %d\n",
  951. gh->gh_gl->gl_name.ln_type, gh->gh_state);
  952. gfs2_dump_glock(NULL, gl);
  953. BUG();
  954. }
  955. /**
  956. * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
  957. * @gh: the holder structure
  958. *
  959. * if (gh->gh_flags & GL_ASYNC), this never returns an error
  960. *
  961. * Returns: 0, GLR_TRYFAILED, or errno on failure
  962. */
  963. int gfs2_glock_nq(struct gfs2_holder *gh)
  964. {
  965. struct gfs2_glock *gl = gh->gh_gl;
  966. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  967. int error = 0;
  968. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  969. return -EIO;
  970. if (test_bit(GLF_LRU, &gl->gl_flags))
  971. gfs2_glock_remove_from_lru(gl);
  972. spin_lock(&gl->gl_lockref.lock);
  973. add_to_queue(gh);
  974. if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
  975. test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
  976. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  977. gl->gl_lockref.count++;
  978. __gfs2_glock_queue_work(gl, 0);
  979. }
  980. run_queue(gl, 1);
  981. spin_unlock(&gl->gl_lockref.lock);
  982. if (!(gh->gh_flags & GL_ASYNC))
  983. error = gfs2_glock_wait(gh);
  984. return error;
  985. }
  986. /**
  987. * gfs2_glock_poll - poll to see if an async request has been completed
  988. * @gh: the holder
  989. *
  990. * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
  991. */
  992. int gfs2_glock_poll(struct gfs2_holder *gh)
  993. {
  994. return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
  995. }
  996. /**
  997. * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
  998. * @gh: the glock holder
  999. *
  1000. */
  1001. void gfs2_glock_dq(struct gfs2_holder *gh)
  1002. {
  1003. struct gfs2_glock *gl = gh->gh_gl;
  1004. const struct gfs2_glock_operations *glops = gl->gl_ops;
  1005. unsigned delay = 0;
  1006. int fast_path = 0;
  1007. spin_lock(&gl->gl_lockref.lock);
  1008. if (gh->gh_flags & GL_NOCACHE)
  1009. handle_callback(gl, LM_ST_UNLOCKED, 0, false);
  1010. list_del_init(&gh->gh_list);
  1011. clear_bit(HIF_HOLDER, &gh->gh_iflags);
  1012. if (find_first_holder(gl) == NULL) {
  1013. if (glops->go_unlock) {
  1014. GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
  1015. spin_unlock(&gl->gl_lockref.lock);
  1016. glops->go_unlock(gh);
  1017. spin_lock(&gl->gl_lockref.lock);
  1018. clear_bit(GLF_LOCK, &gl->gl_flags);
  1019. }
  1020. if (list_empty(&gl->gl_holders) &&
  1021. !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  1022. !test_bit(GLF_DEMOTE, &gl->gl_flags))
  1023. fast_path = 1;
  1024. }
  1025. if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
  1026. gfs2_glock_add_to_lru(gl);
  1027. trace_gfs2_glock_queue(gh, 0);
  1028. if (unlikely(!fast_path)) {
  1029. gl->gl_lockref.count++;
  1030. if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  1031. !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
  1032. gl->gl_name.ln_type == LM_TYPE_INODE)
  1033. delay = gl->gl_hold_time;
  1034. __gfs2_glock_queue_work(gl, delay);
  1035. }
  1036. spin_unlock(&gl->gl_lockref.lock);
  1037. }
  1038. void gfs2_glock_dq_wait(struct gfs2_holder *gh)
  1039. {
  1040. struct gfs2_glock *gl = gh->gh_gl;
  1041. gfs2_glock_dq(gh);
  1042. might_sleep();
  1043. wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
  1044. }
  1045. /**
  1046. * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
  1047. * @gh: the holder structure
  1048. *
  1049. */
  1050. void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
  1051. {
  1052. gfs2_glock_dq(gh);
  1053. gfs2_holder_uninit(gh);
  1054. }
  1055. /**
  1056. * gfs2_glock_nq_num - acquire a glock based on lock number
  1057. * @sdp: the filesystem
  1058. * @number: the lock number
  1059. * @glops: the glock operations for the type of glock
  1060. * @state: the state to acquire the glock in
  1061. * @flags: modifier flags for the acquisition
  1062. * @gh: the struct gfs2_holder
  1063. *
  1064. * Returns: errno
  1065. */
  1066. int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
  1067. const struct gfs2_glock_operations *glops,
  1068. unsigned int state, u16 flags, struct gfs2_holder *gh)
  1069. {
  1070. struct gfs2_glock *gl;
  1071. int error;
  1072. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1073. if (!error) {
  1074. error = gfs2_glock_nq_init(gl, state, flags, gh);
  1075. gfs2_glock_put(gl);
  1076. }
  1077. return error;
  1078. }
  1079. /**
  1080. * glock_compare - Compare two struct gfs2_glock structures for sorting
  1081. * @arg_a: the first structure
  1082. * @arg_b: the second structure
  1083. *
  1084. */
  1085. static int glock_compare(const void *arg_a, const void *arg_b)
  1086. {
  1087. const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
  1088. const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
  1089. const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1090. const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1091. if (a->ln_number > b->ln_number)
  1092. return 1;
  1093. if (a->ln_number < b->ln_number)
  1094. return -1;
  1095. BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
  1096. return 0;
  1097. }
  1098. /**
  1099. * nq_m_sync - synchonously acquire more than one glock in deadlock free order
  1100. * @num_gh: the number of structures
  1101. * @ghs: an array of struct gfs2_holder structures
  1102. *
  1103. * Returns: 0 on success (all glocks acquired),
  1104. * errno on failure (no glocks acquired)
  1105. */
  1106. static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
  1107. struct gfs2_holder **p)
  1108. {
  1109. unsigned int x;
  1110. int error = 0;
  1111. for (x = 0; x < num_gh; x++)
  1112. p[x] = &ghs[x];
  1113. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
  1114. for (x = 0; x < num_gh; x++) {
  1115. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1116. error = gfs2_glock_nq(p[x]);
  1117. if (error) {
  1118. while (x--)
  1119. gfs2_glock_dq(p[x]);
  1120. break;
  1121. }
  1122. }
  1123. return error;
  1124. }
  1125. /**
  1126. * gfs2_glock_nq_m - acquire multiple glocks
  1127. * @num_gh: the number of structures
  1128. * @ghs: an array of struct gfs2_holder structures
  1129. *
  1130. *
  1131. * Returns: 0 on success (all glocks acquired),
  1132. * errno on failure (no glocks acquired)
  1133. */
  1134. int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1135. {
  1136. struct gfs2_holder *tmp[4];
  1137. struct gfs2_holder **pph = tmp;
  1138. int error = 0;
  1139. switch(num_gh) {
  1140. case 0:
  1141. return 0;
  1142. case 1:
  1143. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1144. return gfs2_glock_nq(ghs);
  1145. default:
  1146. if (num_gh <= 4)
  1147. break;
  1148. pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
  1149. GFP_NOFS);
  1150. if (!pph)
  1151. return -ENOMEM;
  1152. }
  1153. error = nq_m_sync(num_gh, ghs, pph);
  1154. if (pph != tmp)
  1155. kfree(pph);
  1156. return error;
  1157. }
  1158. /**
  1159. * gfs2_glock_dq_m - release multiple glocks
  1160. * @num_gh: the number of structures
  1161. * @ghs: an array of struct gfs2_holder structures
  1162. *
  1163. */
  1164. void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1165. {
  1166. while (num_gh--)
  1167. gfs2_glock_dq(&ghs[num_gh]);
  1168. }
  1169. void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
  1170. {
  1171. unsigned long delay = 0;
  1172. unsigned long holdtime;
  1173. unsigned long now = jiffies;
  1174. gfs2_glock_hold(gl);
  1175. holdtime = gl->gl_tchange + gl->gl_hold_time;
  1176. if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
  1177. gl->gl_name.ln_type == LM_TYPE_INODE) {
  1178. if (time_before(now, holdtime))
  1179. delay = holdtime - now;
  1180. if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
  1181. delay = gl->gl_hold_time;
  1182. }
  1183. spin_lock(&gl->gl_lockref.lock);
  1184. handle_callback(gl, state, delay, true);
  1185. __gfs2_glock_queue_work(gl, delay);
  1186. spin_unlock(&gl->gl_lockref.lock);
  1187. }
  1188. /**
  1189. * gfs2_should_freeze - Figure out if glock should be frozen
  1190. * @gl: The glock in question
  1191. *
  1192. * Glocks are not frozen if (a) the result of the dlm operation is
  1193. * an error, (b) the locking operation was an unlock operation or
  1194. * (c) if there is a "noexp" flagged request anywhere in the queue
  1195. *
  1196. * Returns: 1 if freezing should occur, 0 otherwise
  1197. */
  1198. static int gfs2_should_freeze(const struct gfs2_glock *gl)
  1199. {
  1200. const struct gfs2_holder *gh;
  1201. if (gl->gl_reply & ~LM_OUT_ST_MASK)
  1202. return 0;
  1203. if (gl->gl_target == LM_ST_UNLOCKED)
  1204. return 0;
  1205. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1206. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  1207. continue;
  1208. if (LM_FLAG_NOEXP & gh->gh_flags)
  1209. return 0;
  1210. }
  1211. return 1;
  1212. }
  1213. /**
  1214. * gfs2_glock_complete - Callback used by locking
  1215. * @gl: Pointer to the glock
  1216. * @ret: The return value from the dlm
  1217. *
  1218. * The gl_reply field is under the gl_lockref.lock lock so that it is ok
  1219. * to use a bitfield shared with other glock state fields.
  1220. */
  1221. void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
  1222. {
  1223. struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
  1224. spin_lock(&gl->gl_lockref.lock);
  1225. gl->gl_reply = ret;
  1226. if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
  1227. if (gfs2_should_freeze(gl)) {
  1228. set_bit(GLF_FROZEN, &gl->gl_flags);
  1229. spin_unlock(&gl->gl_lockref.lock);
  1230. return;
  1231. }
  1232. }
  1233. gl->gl_lockref.count++;
  1234. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1235. __gfs2_glock_queue_work(gl, 0);
  1236. spin_unlock(&gl->gl_lockref.lock);
  1237. }
  1238. static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
  1239. {
  1240. struct gfs2_glock *gla, *glb;
  1241. gla = list_entry(a, struct gfs2_glock, gl_lru);
  1242. glb = list_entry(b, struct gfs2_glock, gl_lru);
  1243. if (gla->gl_name.ln_number > glb->gl_name.ln_number)
  1244. return 1;
  1245. if (gla->gl_name.ln_number < glb->gl_name.ln_number)
  1246. return -1;
  1247. return 0;
  1248. }
  1249. /**
  1250. * gfs2_dispose_glock_lru - Demote a list of glocks
  1251. * @list: The list to dispose of
  1252. *
  1253. * Disposing of glocks may involve disk accesses, so that here we sort
  1254. * the glocks by number (i.e. disk location of the inodes) so that if
  1255. * there are any such accesses, they'll be sent in order (mostly).
  1256. *
  1257. * Must be called under the lru_lock, but may drop and retake this
  1258. * lock. While the lru_lock is dropped, entries may vanish from the
  1259. * list, but no new entries will appear on the list (since it is
  1260. * private)
  1261. */
  1262. static void gfs2_dispose_glock_lru(struct list_head *list)
  1263. __releases(&lru_lock)
  1264. __acquires(&lru_lock)
  1265. {
  1266. struct gfs2_glock *gl;
  1267. list_sort(NULL, list, glock_cmp);
  1268. while(!list_empty(list)) {
  1269. gl = list_entry(list->next, struct gfs2_glock, gl_lru);
  1270. list_del_init(&gl->gl_lru);
  1271. if (!spin_trylock(&gl->gl_lockref.lock)) {
  1272. add_back_to_lru:
  1273. list_add(&gl->gl_lru, &lru_list);
  1274. set_bit(GLF_LRU, &gl->gl_flags);
  1275. atomic_inc(&lru_count);
  1276. continue;
  1277. }
  1278. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
  1279. spin_unlock(&gl->gl_lockref.lock);
  1280. goto add_back_to_lru;
  1281. }
  1282. gl->gl_lockref.count++;
  1283. if (demote_ok(gl))
  1284. handle_callback(gl, LM_ST_UNLOCKED, 0, false);
  1285. WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
  1286. __gfs2_glock_queue_work(gl, 0);
  1287. spin_unlock(&gl->gl_lockref.lock);
  1288. cond_resched_lock(&lru_lock);
  1289. }
  1290. }
  1291. /**
  1292. * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
  1293. * @nr: The number of entries to scan
  1294. *
  1295. * This function selects the entries on the LRU which are able to
  1296. * be demoted, and then kicks off the process by calling
  1297. * gfs2_dispose_glock_lru() above.
  1298. */
  1299. static long gfs2_scan_glock_lru(int nr)
  1300. {
  1301. struct gfs2_glock *gl;
  1302. LIST_HEAD(skipped);
  1303. LIST_HEAD(dispose);
  1304. long freed = 0;
  1305. spin_lock(&lru_lock);
  1306. while ((nr-- >= 0) && !list_empty(&lru_list)) {
  1307. gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
  1308. /* Test for being demotable */
  1309. if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
  1310. list_move(&gl->gl_lru, &dispose);
  1311. atomic_dec(&lru_count);
  1312. clear_bit(GLF_LRU, &gl->gl_flags);
  1313. freed++;
  1314. continue;
  1315. }
  1316. list_move(&gl->gl_lru, &skipped);
  1317. }
  1318. list_splice(&skipped, &lru_list);
  1319. if (!list_empty(&dispose))
  1320. gfs2_dispose_glock_lru(&dispose);
  1321. spin_unlock(&lru_lock);
  1322. return freed;
  1323. }
  1324. static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
  1325. struct shrink_control *sc)
  1326. {
  1327. if (!(sc->gfp_mask & __GFP_FS))
  1328. return SHRINK_STOP;
  1329. return gfs2_scan_glock_lru(sc->nr_to_scan);
  1330. }
  1331. static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
  1332. struct shrink_control *sc)
  1333. {
  1334. return vfs_pressure_ratio(atomic_read(&lru_count));
  1335. }
  1336. static struct shrinker glock_shrinker = {
  1337. .seeks = DEFAULT_SEEKS,
  1338. .count_objects = gfs2_glock_shrink_count,
  1339. .scan_objects = gfs2_glock_shrink_scan,
  1340. };
  1341. /**
  1342. * examine_bucket - Call a function for glock in a hash bucket
  1343. * @examiner: the function
  1344. * @sdp: the filesystem
  1345. * @bucket: the bucket
  1346. *
  1347. * Note that the function can be called multiple times on the same
  1348. * object. So the user must ensure that the function can cope with
  1349. * that.
  1350. */
  1351. static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
  1352. {
  1353. struct gfs2_glock *gl;
  1354. struct rhashtable_iter iter;
  1355. rhashtable_walk_enter(&gl_hash_table, &iter);
  1356. do {
  1357. rhashtable_walk_start(&iter);
  1358. while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
  1359. if (gl->gl_name.ln_sbd == sdp &&
  1360. lockref_get_not_dead(&gl->gl_lockref))
  1361. examiner(gl);
  1362. rhashtable_walk_stop(&iter);
  1363. } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
  1364. rhashtable_walk_exit(&iter);
  1365. }
  1366. /**
  1367. * thaw_glock - thaw out a glock which has an unprocessed reply waiting
  1368. * @gl: The glock to thaw
  1369. *
  1370. */
  1371. static void thaw_glock(struct gfs2_glock *gl)
  1372. {
  1373. if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
  1374. gfs2_glock_put(gl);
  1375. return;
  1376. }
  1377. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1378. gfs2_glock_queue_work(gl, 0);
  1379. }
  1380. /**
  1381. * clear_glock - look at a glock and see if we can free it from glock cache
  1382. * @gl: the glock to look at
  1383. *
  1384. */
  1385. static void clear_glock(struct gfs2_glock *gl)
  1386. {
  1387. gfs2_glock_remove_from_lru(gl);
  1388. spin_lock(&gl->gl_lockref.lock);
  1389. if (gl->gl_state != LM_ST_UNLOCKED)
  1390. handle_callback(gl, LM_ST_UNLOCKED, 0, false);
  1391. __gfs2_glock_queue_work(gl, 0);
  1392. spin_unlock(&gl->gl_lockref.lock);
  1393. }
  1394. /**
  1395. * gfs2_glock_thaw - Thaw any frozen glocks
  1396. * @sdp: The super block
  1397. *
  1398. */
  1399. void gfs2_glock_thaw(struct gfs2_sbd *sdp)
  1400. {
  1401. glock_hash_walk(thaw_glock, sdp);
  1402. }
  1403. static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
  1404. {
  1405. spin_lock(&gl->gl_lockref.lock);
  1406. gfs2_dump_glock(seq, gl);
  1407. spin_unlock(&gl->gl_lockref.lock);
  1408. }
  1409. static void dump_glock_func(struct gfs2_glock *gl)
  1410. {
  1411. dump_glock(NULL, gl);
  1412. }
  1413. /**
  1414. * gfs2_gl_hash_clear - Empty out the glock hash table
  1415. * @sdp: the filesystem
  1416. * @wait: wait until it's all gone
  1417. *
  1418. * Called when unmounting the filesystem.
  1419. */
  1420. void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
  1421. {
  1422. set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
  1423. flush_workqueue(glock_workqueue);
  1424. glock_hash_walk(clear_glock, sdp);
  1425. flush_workqueue(glock_workqueue);
  1426. wait_event_timeout(sdp->sd_glock_wait,
  1427. atomic_read(&sdp->sd_glock_disposal) == 0,
  1428. HZ * 600);
  1429. glock_hash_walk(dump_glock_func, sdp);
  1430. }
  1431. void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
  1432. {
  1433. struct gfs2_glock *gl = ip->i_gl;
  1434. int ret;
  1435. ret = gfs2_truncatei_resume(ip);
  1436. gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
  1437. spin_lock(&gl->gl_lockref.lock);
  1438. clear_bit(GLF_LOCK, &gl->gl_flags);
  1439. run_queue(gl, 1);
  1440. spin_unlock(&gl->gl_lockref.lock);
  1441. }
  1442. static const char *state2str(unsigned state)
  1443. {
  1444. switch(state) {
  1445. case LM_ST_UNLOCKED:
  1446. return "UN";
  1447. case LM_ST_SHARED:
  1448. return "SH";
  1449. case LM_ST_DEFERRED:
  1450. return "DF";
  1451. case LM_ST_EXCLUSIVE:
  1452. return "EX";
  1453. }
  1454. return "??";
  1455. }
  1456. static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
  1457. {
  1458. char *p = buf;
  1459. if (flags & LM_FLAG_TRY)
  1460. *p++ = 't';
  1461. if (flags & LM_FLAG_TRY_1CB)
  1462. *p++ = 'T';
  1463. if (flags & LM_FLAG_NOEXP)
  1464. *p++ = 'e';
  1465. if (flags & LM_FLAG_ANY)
  1466. *p++ = 'A';
  1467. if (flags & LM_FLAG_PRIORITY)
  1468. *p++ = 'p';
  1469. if (flags & GL_ASYNC)
  1470. *p++ = 'a';
  1471. if (flags & GL_EXACT)
  1472. *p++ = 'E';
  1473. if (flags & GL_NOCACHE)
  1474. *p++ = 'c';
  1475. if (test_bit(HIF_HOLDER, &iflags))
  1476. *p++ = 'H';
  1477. if (test_bit(HIF_WAIT, &iflags))
  1478. *p++ = 'W';
  1479. if (test_bit(HIF_FIRST, &iflags))
  1480. *p++ = 'F';
  1481. *p = 0;
  1482. return buf;
  1483. }
  1484. /**
  1485. * dump_holder - print information about a glock holder
  1486. * @seq: the seq_file struct
  1487. * @gh: the glock holder
  1488. *
  1489. */
  1490. static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
  1491. {
  1492. struct task_struct *gh_owner = NULL;
  1493. char flags_buf[32];
  1494. rcu_read_lock();
  1495. if (gh->gh_owner_pid)
  1496. gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
  1497. gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
  1498. state2str(gh->gh_state),
  1499. hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
  1500. gh->gh_error,
  1501. gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
  1502. gh_owner ? gh_owner->comm : "(ended)",
  1503. (void *)gh->gh_ip);
  1504. rcu_read_unlock();
  1505. }
  1506. static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
  1507. {
  1508. const unsigned long *gflags = &gl->gl_flags;
  1509. char *p = buf;
  1510. if (test_bit(GLF_LOCK, gflags))
  1511. *p++ = 'l';
  1512. if (test_bit(GLF_DEMOTE, gflags))
  1513. *p++ = 'D';
  1514. if (test_bit(GLF_PENDING_DEMOTE, gflags))
  1515. *p++ = 'd';
  1516. if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
  1517. *p++ = 'p';
  1518. if (test_bit(GLF_DIRTY, gflags))
  1519. *p++ = 'y';
  1520. if (test_bit(GLF_LFLUSH, gflags))
  1521. *p++ = 'f';
  1522. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
  1523. *p++ = 'i';
  1524. if (test_bit(GLF_REPLY_PENDING, gflags))
  1525. *p++ = 'r';
  1526. if (test_bit(GLF_INITIAL, gflags))
  1527. *p++ = 'I';
  1528. if (test_bit(GLF_FROZEN, gflags))
  1529. *p++ = 'F';
  1530. if (test_bit(GLF_QUEUED, gflags))
  1531. *p++ = 'q';
  1532. if (test_bit(GLF_LRU, gflags))
  1533. *p++ = 'L';
  1534. if (gl->gl_object)
  1535. *p++ = 'o';
  1536. if (test_bit(GLF_BLOCKING, gflags))
  1537. *p++ = 'b';
  1538. *p = 0;
  1539. return buf;
  1540. }
  1541. /**
  1542. * gfs2_dump_glock - print information about a glock
  1543. * @seq: The seq_file struct
  1544. * @gl: the glock
  1545. *
  1546. * The file format is as follows:
  1547. * One line per object, capital letters are used to indicate objects
  1548. * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
  1549. * other objects are indented by a single space and follow the glock to
  1550. * which they are related. Fields are indicated by lower case letters
  1551. * followed by a colon and the field value, except for strings which are in
  1552. * [] so that its possible to see if they are composed of spaces for
  1553. * example. The field's are n = number (id of the object), f = flags,
  1554. * t = type, s = state, r = refcount, e = error, p = pid.
  1555. *
  1556. */
  1557. void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
  1558. {
  1559. const struct gfs2_glock_operations *glops = gl->gl_ops;
  1560. unsigned long long dtime;
  1561. const struct gfs2_holder *gh;
  1562. char gflags_buf[32];
  1563. dtime = jiffies - gl->gl_demote_time;
  1564. dtime *= 1000000/HZ; /* demote time in uSec */
  1565. if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
  1566. dtime = 0;
  1567. gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
  1568. state2str(gl->gl_state),
  1569. gl->gl_name.ln_type,
  1570. (unsigned long long)gl->gl_name.ln_number,
  1571. gflags2str(gflags_buf, gl),
  1572. state2str(gl->gl_target),
  1573. state2str(gl->gl_demote_state), dtime,
  1574. atomic_read(&gl->gl_ail_count),
  1575. atomic_read(&gl->gl_revokes),
  1576. (int)gl->gl_lockref.count, gl->gl_hold_time);
  1577. list_for_each_entry(gh, &gl->gl_holders, gh_list)
  1578. dump_holder(seq, gh);
  1579. if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
  1580. glops->go_dump(seq, gl);
  1581. }
  1582. static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
  1583. {
  1584. struct gfs2_glock *gl = iter_ptr;
  1585. seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
  1586. gl->gl_name.ln_type,
  1587. (unsigned long long)gl->gl_name.ln_number,
  1588. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
  1589. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
  1590. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
  1591. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
  1592. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
  1593. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
  1594. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
  1595. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
  1596. return 0;
  1597. }
  1598. static const char *gfs2_gltype[] = {
  1599. "type",
  1600. "reserved",
  1601. "nondisk",
  1602. "inode",
  1603. "rgrp",
  1604. "meta",
  1605. "iopen",
  1606. "flock",
  1607. "plock",
  1608. "quota",
  1609. "journal",
  1610. };
  1611. static const char *gfs2_stype[] = {
  1612. [GFS2_LKS_SRTT] = "srtt",
  1613. [GFS2_LKS_SRTTVAR] = "srttvar",
  1614. [GFS2_LKS_SRTTB] = "srttb",
  1615. [GFS2_LKS_SRTTVARB] = "srttvarb",
  1616. [GFS2_LKS_SIRT] = "sirt",
  1617. [GFS2_LKS_SIRTVAR] = "sirtvar",
  1618. [GFS2_LKS_DCOUNT] = "dlm",
  1619. [GFS2_LKS_QCOUNT] = "queue",
  1620. };
  1621. #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
  1622. static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
  1623. {
  1624. struct gfs2_sbd *sdp = seq->private;
  1625. loff_t pos = *(loff_t *)iter_ptr;
  1626. unsigned index = pos >> 3;
  1627. unsigned subindex = pos & 0x07;
  1628. int i;
  1629. if (index == 0 && subindex != 0)
  1630. return 0;
  1631. seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
  1632. (index == 0) ? "cpu": gfs2_stype[subindex]);
  1633. for_each_possible_cpu(i) {
  1634. const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
  1635. if (index == 0)
  1636. seq_printf(seq, " %15u", i);
  1637. else
  1638. seq_printf(seq, " %15llu", (unsigned long long)lkstats->
  1639. lkstats[index - 1].stats[subindex]);
  1640. }
  1641. seq_putc(seq, '\n');
  1642. return 0;
  1643. }
  1644. int __init gfs2_glock_init(void)
  1645. {
  1646. int i, ret;
  1647. ret = rhashtable_init(&gl_hash_table, &ht_parms);
  1648. if (ret < 0)
  1649. return ret;
  1650. glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
  1651. WQ_HIGHPRI | WQ_FREEZABLE, 0);
  1652. if (!glock_workqueue) {
  1653. rhashtable_destroy(&gl_hash_table);
  1654. return -ENOMEM;
  1655. }
  1656. gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
  1657. WQ_MEM_RECLAIM | WQ_FREEZABLE,
  1658. 0);
  1659. if (!gfs2_delete_workqueue) {
  1660. destroy_workqueue(glock_workqueue);
  1661. rhashtable_destroy(&gl_hash_table);
  1662. return -ENOMEM;
  1663. }
  1664. ret = register_shrinker(&glock_shrinker);
  1665. if (ret) {
  1666. destroy_workqueue(gfs2_delete_workqueue);
  1667. destroy_workqueue(glock_workqueue);
  1668. rhashtable_destroy(&gl_hash_table);
  1669. return ret;
  1670. }
  1671. for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
  1672. init_waitqueue_head(glock_wait_table + i);
  1673. return 0;
  1674. }
  1675. void gfs2_glock_exit(void)
  1676. {
  1677. unregister_shrinker(&glock_shrinker);
  1678. rhashtable_destroy(&gl_hash_table);
  1679. destroy_workqueue(glock_workqueue);
  1680. destroy_workqueue(gfs2_delete_workqueue);
  1681. }
  1682. static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
  1683. {
  1684. struct gfs2_glock *gl = gi->gl;
  1685. if (gl) {
  1686. if (n == 0)
  1687. return;
  1688. if (!lockref_put_not_zero(&gl->gl_lockref))
  1689. gfs2_glock_queue_put(gl);
  1690. }
  1691. for (;;) {
  1692. gl = rhashtable_walk_next(&gi->hti);
  1693. if (IS_ERR_OR_NULL(gl)) {
  1694. if (gl == ERR_PTR(-EAGAIN)) {
  1695. n = 1;
  1696. continue;
  1697. }
  1698. gl = NULL;
  1699. break;
  1700. }
  1701. if (gl->gl_name.ln_sbd != gi->sdp)
  1702. continue;
  1703. if (n <= 1) {
  1704. if (!lockref_get_not_dead(&gl->gl_lockref))
  1705. continue;
  1706. break;
  1707. } else {
  1708. if (__lockref_is_dead(&gl->gl_lockref))
  1709. continue;
  1710. n--;
  1711. }
  1712. }
  1713. gi->gl = gl;
  1714. }
  1715. static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
  1716. __acquires(RCU)
  1717. {
  1718. struct gfs2_glock_iter *gi = seq->private;
  1719. loff_t n;
  1720. /*
  1721. * We can either stay where we are, skip to the next hash table
  1722. * entry, or start from the beginning.
  1723. */
  1724. if (*pos < gi->last_pos) {
  1725. rhashtable_walk_exit(&gi->hti);
  1726. rhashtable_walk_enter(&gl_hash_table, &gi->hti);
  1727. n = *pos + 1;
  1728. } else {
  1729. n = *pos - gi->last_pos;
  1730. }
  1731. rhashtable_walk_start(&gi->hti);
  1732. gfs2_glock_iter_next(gi, n);
  1733. gi->last_pos = *pos;
  1734. return gi->gl;
  1735. }
  1736. static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
  1737. loff_t *pos)
  1738. {
  1739. struct gfs2_glock_iter *gi = seq->private;
  1740. (*pos)++;
  1741. gi->last_pos = *pos;
  1742. gfs2_glock_iter_next(gi, 1);
  1743. return gi->gl;
  1744. }
  1745. static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
  1746. __releases(RCU)
  1747. {
  1748. struct gfs2_glock_iter *gi = seq->private;
  1749. rhashtable_walk_stop(&gi->hti);
  1750. }
  1751. static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
  1752. {
  1753. dump_glock(seq, iter_ptr);
  1754. return 0;
  1755. }
  1756. static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
  1757. {
  1758. preempt_disable();
  1759. if (*pos >= GFS2_NR_SBSTATS)
  1760. return NULL;
  1761. return pos;
  1762. }
  1763. static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
  1764. loff_t *pos)
  1765. {
  1766. (*pos)++;
  1767. if (*pos >= GFS2_NR_SBSTATS)
  1768. return NULL;
  1769. return pos;
  1770. }
  1771. static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
  1772. {
  1773. preempt_enable();
  1774. }
  1775. static const struct seq_operations gfs2_glock_seq_ops = {
  1776. .start = gfs2_glock_seq_start,
  1777. .next = gfs2_glock_seq_next,
  1778. .stop = gfs2_glock_seq_stop,
  1779. .show = gfs2_glock_seq_show,
  1780. };
  1781. static const struct seq_operations gfs2_glstats_seq_ops = {
  1782. .start = gfs2_glock_seq_start,
  1783. .next = gfs2_glock_seq_next,
  1784. .stop = gfs2_glock_seq_stop,
  1785. .show = gfs2_glstats_seq_show,
  1786. };
  1787. static const struct seq_operations gfs2_sbstats_seq_ops = {
  1788. .start = gfs2_sbstats_seq_start,
  1789. .next = gfs2_sbstats_seq_next,
  1790. .stop = gfs2_sbstats_seq_stop,
  1791. .show = gfs2_sbstats_seq_show,
  1792. };
  1793. #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
  1794. static int __gfs2_glocks_open(struct inode *inode, struct file *file,
  1795. const struct seq_operations *ops)
  1796. {
  1797. int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
  1798. if (ret == 0) {
  1799. struct seq_file *seq = file->private_data;
  1800. struct gfs2_glock_iter *gi = seq->private;
  1801. gi->sdp = inode->i_private;
  1802. seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
  1803. if (seq->buf)
  1804. seq->size = GFS2_SEQ_GOODSIZE;
  1805. /*
  1806. * Initially, we are "before" the first hash table entry; the
  1807. * first call to rhashtable_walk_next gets us the first entry.
  1808. */
  1809. gi->last_pos = -1;
  1810. gi->gl = NULL;
  1811. rhashtable_walk_enter(&gl_hash_table, &gi->hti);
  1812. }
  1813. return ret;
  1814. }
  1815. static int gfs2_glocks_open(struct inode *inode, struct file *file)
  1816. {
  1817. return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
  1818. }
  1819. static int gfs2_glocks_release(struct inode *inode, struct file *file)
  1820. {
  1821. struct seq_file *seq = file->private_data;
  1822. struct gfs2_glock_iter *gi = seq->private;
  1823. if (gi->gl)
  1824. gfs2_glock_put(gi->gl);
  1825. rhashtable_walk_exit(&gi->hti);
  1826. return seq_release_private(inode, file);
  1827. }
  1828. static int gfs2_glstats_open(struct inode *inode, struct file *file)
  1829. {
  1830. return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
  1831. }
  1832. static int gfs2_sbstats_open(struct inode *inode, struct file *file)
  1833. {
  1834. int ret = seq_open(file, &gfs2_sbstats_seq_ops);
  1835. if (ret == 0) {
  1836. struct seq_file *seq = file->private_data;
  1837. seq->private = inode->i_private; /* sdp */
  1838. }
  1839. return ret;
  1840. }
  1841. static const struct file_operations gfs2_glocks_fops = {
  1842. .owner = THIS_MODULE,
  1843. .open = gfs2_glocks_open,
  1844. .read = seq_read,
  1845. .llseek = seq_lseek,
  1846. .release = gfs2_glocks_release,
  1847. };
  1848. static const struct file_operations gfs2_glstats_fops = {
  1849. .owner = THIS_MODULE,
  1850. .open = gfs2_glstats_open,
  1851. .read = seq_read,
  1852. .llseek = seq_lseek,
  1853. .release = gfs2_glocks_release,
  1854. };
  1855. static const struct file_operations gfs2_sbstats_fops = {
  1856. .owner = THIS_MODULE,
  1857. .open = gfs2_sbstats_open,
  1858. .read = seq_read,
  1859. .llseek = seq_lseek,
  1860. .release = seq_release,
  1861. };
  1862. int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
  1863. {
  1864. struct dentry *dent;
  1865. dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
  1866. if (IS_ERR_OR_NULL(dent))
  1867. goto fail;
  1868. sdp->debugfs_dir = dent;
  1869. dent = debugfs_create_file("glocks",
  1870. S_IFREG | S_IRUGO,
  1871. sdp->debugfs_dir, sdp,
  1872. &gfs2_glocks_fops);
  1873. if (IS_ERR_OR_NULL(dent))
  1874. goto fail;
  1875. sdp->debugfs_dentry_glocks = dent;
  1876. dent = debugfs_create_file("glstats",
  1877. S_IFREG | S_IRUGO,
  1878. sdp->debugfs_dir, sdp,
  1879. &gfs2_glstats_fops);
  1880. if (IS_ERR_OR_NULL(dent))
  1881. goto fail;
  1882. sdp->debugfs_dentry_glstats = dent;
  1883. dent = debugfs_create_file("sbstats",
  1884. S_IFREG | S_IRUGO,
  1885. sdp->debugfs_dir, sdp,
  1886. &gfs2_sbstats_fops);
  1887. if (IS_ERR_OR_NULL(dent))
  1888. goto fail;
  1889. sdp->debugfs_dentry_sbstats = dent;
  1890. return 0;
  1891. fail:
  1892. gfs2_delete_debugfs_file(sdp);
  1893. return dent ? PTR_ERR(dent) : -ENOMEM;
  1894. }
  1895. void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
  1896. {
  1897. if (sdp->debugfs_dir) {
  1898. if (sdp->debugfs_dentry_glocks) {
  1899. debugfs_remove(sdp->debugfs_dentry_glocks);
  1900. sdp->debugfs_dentry_glocks = NULL;
  1901. }
  1902. if (sdp->debugfs_dentry_glstats) {
  1903. debugfs_remove(sdp->debugfs_dentry_glstats);
  1904. sdp->debugfs_dentry_glstats = NULL;
  1905. }
  1906. if (sdp->debugfs_dentry_sbstats) {
  1907. debugfs_remove(sdp->debugfs_dentry_sbstats);
  1908. sdp->debugfs_dentry_sbstats = NULL;
  1909. }
  1910. debugfs_remove(sdp->debugfs_dir);
  1911. sdp->debugfs_dir = NULL;
  1912. }
  1913. }
  1914. int gfs2_register_debugfs(void)
  1915. {
  1916. gfs2_root = debugfs_create_dir("gfs2", NULL);
  1917. if (IS_ERR(gfs2_root))
  1918. return PTR_ERR(gfs2_root);
  1919. return gfs2_root ? 0 : -ENOMEM;
  1920. }
  1921. void gfs2_unregister_debugfs(void)
  1922. {
  1923. debugfs_remove(gfs2_root);
  1924. gfs2_root = NULL;
  1925. }