glops.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/gfs2_ondisk.h>
  15. #include "gfs2.h"
  16. #include "lm_interface.h"
  17. #include "incore.h"
  18. #include "bmap.h"
  19. #include "glock.h"
  20. #include "glops.h"
  21. #include "inode.h"
  22. #include "log.h"
  23. #include "meta_io.h"
  24. #include "recovery.h"
  25. #include "rgrp.h"
  26. #include "util.h"
  27. /**
  28. * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
  29. * @gl: the glock
  30. *
  31. */
  32. static void gfs2_pte_inval(struct gfs2_glock *gl)
  33. {
  34. struct gfs2_inode *ip;
  35. struct inode *inode;
  36. ip = gl->gl_object;
  37. inode = &ip->i_inode;
  38. if (!ip || !S_ISREG(ip->i_di.di_mode))
  39. return;
  40. if (!test_bit(GIF_PAGED, &ip->i_flags))
  41. return;
  42. unmap_shared_mapping_range(inode->i_mapping, 0, 0);
  43. if (test_bit(GIF_SW_PAGED, &ip->i_flags))
  44. set_bit(GLF_DIRTY, &gl->gl_flags);
  45. clear_bit(GIF_SW_PAGED, &ip->i_flags);
  46. }
  47. /**
  48. * gfs2_page_inval - Invalidate all pages associated with a glock
  49. * @gl: the glock
  50. *
  51. */
  52. static void gfs2_page_inval(struct gfs2_glock *gl)
  53. {
  54. struct gfs2_inode *ip;
  55. struct inode *inode;
  56. ip = gl->gl_object;
  57. inode = &ip->i_inode;
  58. if (!ip || !S_ISREG(ip->i_di.di_mode))
  59. return;
  60. truncate_inode_pages(inode->i_mapping, 0);
  61. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
  62. clear_bit(GIF_PAGED, &ip->i_flags);
  63. }
  64. /**
  65. * gfs2_page_sync - Sync the data pages (not metadata) associated with a glock
  66. * @gl: the glock
  67. * @flags: DIO_START | DIO_WAIT
  68. *
  69. * Syncs data (not metadata) for a regular file.
  70. * No-op for all other types.
  71. */
  72. static void gfs2_page_sync(struct gfs2_glock *gl, int flags)
  73. {
  74. struct gfs2_inode *ip;
  75. struct inode *inode;
  76. struct address_space *mapping;
  77. int error = 0;
  78. ip = gl->gl_object;
  79. inode = &ip->i_inode;
  80. if (!ip || !S_ISREG(ip->i_di.di_mode))
  81. return;
  82. mapping = inode->i_mapping;
  83. if (flags & DIO_START)
  84. filemap_fdatawrite(mapping);
  85. if (!error && (flags & DIO_WAIT))
  86. error = filemap_fdatawait(mapping);
  87. /* Put back any errors cleared by filemap_fdatawait()
  88. so they can be caught by someone who can pass them
  89. up to user space. */
  90. if (error == -ENOSPC)
  91. set_bit(AS_ENOSPC, &mapping->flags);
  92. else if (error)
  93. set_bit(AS_EIO, &mapping->flags);
  94. }
  95. /**
  96. * meta_go_sync - sync out the metadata for this glock
  97. * @gl: the glock
  98. * @flags: DIO_*
  99. *
  100. * Called when demoting or unlocking an EX glock. We must flush
  101. * to disk all dirty buffers/pages relating to this glock, and must not
  102. * not return to caller to demote/unlock the glock until I/O is complete.
  103. */
  104. static void meta_go_sync(struct gfs2_glock *gl, int flags)
  105. {
  106. if (!(flags & DIO_METADATA))
  107. return;
  108. if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
  109. gfs2_log_flush(gl->gl_sbd, gl);
  110. gfs2_meta_sync(gl, flags | DIO_START | DIO_WAIT);
  111. if (flags & DIO_RELEASE)
  112. gfs2_ail_empty_gl(gl);
  113. }
  114. clear_bit(GLF_SYNC, &gl->gl_flags);
  115. }
  116. /**
  117. * meta_go_inval - invalidate the metadata for this glock
  118. * @gl: the glock
  119. * @flags:
  120. *
  121. */
  122. static void meta_go_inval(struct gfs2_glock *gl, int flags)
  123. {
  124. if (!(flags & DIO_METADATA))
  125. return;
  126. gfs2_meta_inval(gl);
  127. gl->gl_vn++;
  128. }
  129. /**
  130. * meta_go_demote_ok - Check to see if it's ok to unlock a glock
  131. * @gl: the glock
  132. *
  133. * Returns: 1 if we have no cached data; ok to demote meta glock
  134. */
  135. static int meta_go_demote_ok(struct gfs2_glock *gl)
  136. {
  137. return !gl->gl_aspace->i_mapping->nrpages;
  138. }
  139. /**
  140. * inode_go_xmote_th - promote/demote a glock
  141. * @gl: the glock
  142. * @state: the requested state
  143. * @flags:
  144. *
  145. */
  146. static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
  147. int flags)
  148. {
  149. if (gl->gl_state != LM_ST_UNLOCKED)
  150. gfs2_pte_inval(gl);
  151. gfs2_glock_xmote_th(gl, state, flags);
  152. }
  153. /**
  154. * inode_go_xmote_bh - After promoting/demoting a glock
  155. * @gl: the glock
  156. *
  157. */
  158. static void inode_go_xmote_bh(struct gfs2_glock *gl)
  159. {
  160. struct gfs2_holder *gh = gl->gl_req_gh;
  161. struct buffer_head *bh;
  162. int error;
  163. if (gl->gl_state != LM_ST_UNLOCKED &&
  164. (!gh || !(gh->gh_flags & GL_SKIP))) {
  165. error = gfs2_meta_read(gl, gl->gl_name.ln_number, DIO_START,
  166. &bh);
  167. if (!error)
  168. brelse(bh);
  169. }
  170. }
  171. /**
  172. * inode_go_drop_th - unlock a glock
  173. * @gl: the glock
  174. *
  175. * Invoked from rq_demote().
  176. * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
  177. * is being purged from our node's glock cache; we're dropping lock.
  178. */
  179. static void inode_go_drop_th(struct gfs2_glock *gl)
  180. {
  181. gfs2_pte_inval(gl);
  182. gfs2_glock_drop_th(gl);
  183. }
  184. /**
  185. * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
  186. * @gl: the glock protecting the inode
  187. * @flags:
  188. *
  189. */
  190. static void inode_go_sync(struct gfs2_glock *gl, int flags)
  191. {
  192. int meta = (flags & DIO_METADATA);
  193. int data = (flags & DIO_DATA);
  194. if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
  195. if (meta && data) {
  196. gfs2_page_sync(gl, flags | DIO_START);
  197. gfs2_log_flush(gl->gl_sbd, gl);
  198. gfs2_meta_sync(gl, flags | DIO_START | DIO_WAIT);
  199. gfs2_page_sync(gl, flags | DIO_WAIT);
  200. clear_bit(GLF_DIRTY, &gl->gl_flags);
  201. } else if (meta) {
  202. gfs2_log_flush(gl->gl_sbd, gl);
  203. gfs2_meta_sync(gl, flags | DIO_START | DIO_WAIT);
  204. } else if (data)
  205. gfs2_page_sync(gl, flags | DIO_START | DIO_WAIT);
  206. if (flags & DIO_RELEASE)
  207. gfs2_ail_empty_gl(gl);
  208. }
  209. clear_bit(GLF_SYNC, &gl->gl_flags);
  210. }
  211. /**
  212. * inode_go_inval - prepare a inode glock to be released
  213. * @gl: the glock
  214. * @flags:
  215. *
  216. */
  217. static void inode_go_inval(struct gfs2_glock *gl, int flags)
  218. {
  219. int meta = (flags & DIO_METADATA);
  220. int data = (flags & DIO_DATA);
  221. if (meta) {
  222. gfs2_meta_inval(gl);
  223. gl->gl_vn++;
  224. }
  225. if (data)
  226. gfs2_page_inval(gl);
  227. }
  228. /**
  229. * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
  230. * @gl: the glock
  231. *
  232. * Returns: 1 if it's ok
  233. */
  234. static int inode_go_demote_ok(struct gfs2_glock *gl)
  235. {
  236. struct gfs2_sbd *sdp = gl->gl_sbd;
  237. int demote = 0;
  238. if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
  239. demote = 1;
  240. else if (!sdp->sd_args.ar_localcaching &&
  241. time_after_eq(jiffies, gl->gl_stamp +
  242. gfs2_tune_get(sdp, gt_demote_secs) * HZ))
  243. demote = 1;
  244. return demote;
  245. }
  246. /**
  247. * inode_go_lock - operation done after an inode lock is locked by a process
  248. * @gl: the glock
  249. * @flags:
  250. *
  251. * Returns: errno
  252. */
  253. static int inode_go_lock(struct gfs2_holder *gh)
  254. {
  255. struct gfs2_glock *gl = gh->gh_gl;
  256. struct gfs2_inode *ip = gl->gl_object;
  257. int error = 0;
  258. if (!ip)
  259. return 0;
  260. if (ip->i_vn != gl->gl_vn) {
  261. error = gfs2_inode_refresh(ip);
  262. if (error)
  263. return error;
  264. gfs2_inode_attr_in(ip);
  265. }
  266. if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
  267. (gl->gl_state == LM_ST_EXCLUSIVE) &&
  268. (gh->gh_flags & GL_LOCAL_EXCL))
  269. error = gfs2_truncatei_resume(ip);
  270. return error;
  271. }
  272. /**
  273. * inode_go_unlock - operation done before an inode lock is unlocked by a
  274. * process
  275. * @gl: the glock
  276. * @flags:
  277. *
  278. */
  279. static void inode_go_unlock(struct gfs2_holder *gh)
  280. {
  281. struct gfs2_glock *gl = gh->gh_gl;
  282. struct gfs2_inode *ip = gl->gl_object;
  283. if (ip && test_bit(GLF_DIRTY, &gl->gl_flags))
  284. gfs2_inode_attr_in(ip);
  285. if (ip)
  286. gfs2_meta_cache_flush(ip);
  287. }
  288. /**
  289. * inode_greedy -
  290. * @gl: the glock
  291. *
  292. */
  293. static void inode_greedy(struct gfs2_glock *gl)
  294. {
  295. struct gfs2_sbd *sdp = gl->gl_sbd;
  296. struct gfs2_inode *ip = gl->gl_object;
  297. unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum);
  298. unsigned int max = gfs2_tune_get(sdp, gt_greedy_max);
  299. unsigned int new_time;
  300. spin_lock(&ip->i_spin);
  301. if (time_after(ip->i_last_pfault + quantum, jiffies)) {
  302. new_time = ip->i_greedy + quantum;
  303. if (new_time > max)
  304. new_time = max;
  305. } else {
  306. new_time = ip->i_greedy - quantum;
  307. if (!new_time || new_time > max)
  308. new_time = 1;
  309. }
  310. ip->i_greedy = new_time;
  311. spin_unlock(&ip->i_spin);
  312. iput(&ip->i_inode);
  313. }
  314. /**
  315. * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
  316. * @gl: the glock
  317. *
  318. * Returns: 1 if it's ok
  319. */
  320. static int rgrp_go_demote_ok(struct gfs2_glock *gl)
  321. {
  322. return !gl->gl_aspace->i_mapping->nrpages;
  323. }
  324. /**
  325. * rgrp_go_lock - operation done after an rgrp lock is locked by
  326. * a first holder on this node.
  327. * @gl: the glock
  328. * @flags:
  329. *
  330. * Returns: errno
  331. */
  332. static int rgrp_go_lock(struct gfs2_holder *gh)
  333. {
  334. return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
  335. }
  336. /**
  337. * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
  338. * a last holder on this node.
  339. * @gl: the glock
  340. * @flags:
  341. *
  342. */
  343. static void rgrp_go_unlock(struct gfs2_holder *gh)
  344. {
  345. gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
  346. }
  347. /**
  348. * trans_go_xmote_th - promote/demote the transaction glock
  349. * @gl: the glock
  350. * @state: the requested state
  351. * @flags:
  352. *
  353. */
  354. static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
  355. int flags)
  356. {
  357. struct gfs2_sbd *sdp = gl->gl_sbd;
  358. if (gl->gl_state != LM_ST_UNLOCKED &&
  359. test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
  360. gfs2_meta_syncfs(sdp);
  361. gfs2_log_shutdown(sdp);
  362. }
  363. gfs2_glock_xmote_th(gl, state, flags);
  364. }
  365. /**
  366. * trans_go_xmote_bh - After promoting/demoting the transaction glock
  367. * @gl: the glock
  368. *
  369. */
  370. static void trans_go_xmote_bh(struct gfs2_glock *gl)
  371. {
  372. struct gfs2_sbd *sdp = gl->gl_sbd;
  373. struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
  374. struct gfs2_glock *j_gl = ip->i_gl;
  375. struct gfs2_log_header head;
  376. int error;
  377. if (gl->gl_state != LM_ST_UNLOCKED &&
  378. test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
  379. gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
  380. j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
  381. error = gfs2_find_jhead(sdp->sd_jdesc, &head);
  382. if (error)
  383. gfs2_consist(sdp);
  384. if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
  385. gfs2_consist(sdp);
  386. /* Initialize some head of the log stuff */
  387. if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
  388. sdp->sd_log_sequence = head.lh_sequence + 1;
  389. gfs2_log_pointers_init(sdp, head.lh_blkno);
  390. }
  391. }
  392. }
  393. /**
  394. * trans_go_drop_th - unlock the transaction glock
  395. * @gl: the glock
  396. *
  397. * We want to sync the device even with localcaching. Remember
  398. * that localcaching journal replay only marks buffers dirty.
  399. */
  400. static void trans_go_drop_th(struct gfs2_glock *gl)
  401. {
  402. struct gfs2_sbd *sdp = gl->gl_sbd;
  403. if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
  404. gfs2_meta_syncfs(sdp);
  405. gfs2_log_shutdown(sdp);
  406. }
  407. gfs2_glock_drop_th(gl);
  408. }
  409. /**
  410. * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
  411. * @gl: the glock
  412. *
  413. * Returns: 1 if it's ok
  414. */
  415. static int quota_go_demote_ok(struct gfs2_glock *gl)
  416. {
  417. return !atomic_read(&gl->gl_lvb_count);
  418. }
  419. struct gfs2_glock_operations gfs2_meta_glops = {
  420. .go_xmote_th = gfs2_glock_xmote_th,
  421. .go_drop_th = gfs2_glock_drop_th,
  422. .go_sync = meta_go_sync,
  423. .go_inval = meta_go_inval,
  424. .go_demote_ok = meta_go_demote_ok,
  425. .go_type = LM_TYPE_META
  426. };
  427. struct gfs2_glock_operations gfs2_inode_glops = {
  428. .go_xmote_th = inode_go_xmote_th,
  429. .go_xmote_bh = inode_go_xmote_bh,
  430. .go_drop_th = inode_go_drop_th,
  431. .go_sync = inode_go_sync,
  432. .go_inval = inode_go_inval,
  433. .go_demote_ok = inode_go_demote_ok,
  434. .go_lock = inode_go_lock,
  435. .go_unlock = inode_go_unlock,
  436. .go_greedy = inode_greedy,
  437. .go_type = LM_TYPE_INODE
  438. };
  439. struct gfs2_glock_operations gfs2_rgrp_glops = {
  440. .go_xmote_th = gfs2_glock_xmote_th,
  441. .go_drop_th = gfs2_glock_drop_th,
  442. .go_sync = meta_go_sync,
  443. .go_inval = meta_go_inval,
  444. .go_demote_ok = rgrp_go_demote_ok,
  445. .go_lock = rgrp_go_lock,
  446. .go_unlock = rgrp_go_unlock,
  447. .go_type = LM_TYPE_RGRP
  448. };
  449. struct gfs2_glock_operations gfs2_trans_glops = {
  450. .go_xmote_th = trans_go_xmote_th,
  451. .go_xmote_bh = trans_go_xmote_bh,
  452. .go_drop_th = trans_go_drop_th,
  453. .go_type = LM_TYPE_NONDISK
  454. };
  455. struct gfs2_glock_operations gfs2_iopen_glops = {
  456. .go_xmote_th = gfs2_glock_xmote_th,
  457. .go_drop_th = gfs2_glock_drop_th,
  458. .go_callback = gfs2_iopen_go_callback,
  459. .go_type = LM_TYPE_IOPEN
  460. };
  461. struct gfs2_glock_operations gfs2_flock_glops = {
  462. .go_xmote_th = gfs2_glock_xmote_th,
  463. .go_drop_th = gfs2_glock_drop_th,
  464. .go_type = LM_TYPE_FLOCK
  465. };
  466. struct gfs2_glock_operations gfs2_nondisk_glops = {
  467. .go_xmote_th = gfs2_glock_xmote_th,
  468. .go_drop_th = gfs2_glock_drop_th,
  469. .go_type = LM_TYPE_NONDISK
  470. };
  471. struct gfs2_glock_operations gfs2_quota_glops = {
  472. .go_xmote_th = gfs2_glock_xmote_th,
  473. .go_drop_th = gfs2_glock_drop_th,
  474. .go_demote_ok = quota_go_demote_ok,
  475. .go_type = LM_TYPE_QUOTA
  476. };
  477. struct gfs2_glock_operations gfs2_journal_glops = {
  478. .go_xmote_th = gfs2_glock_xmote_th,
  479. .go_drop_th = gfs2_glock_drop_th,
  480. .go_type = LM_TYPE_JOURNAL
  481. };