lops.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/mempool.h>
  15. #include <linux/gfs2_ondisk.h>
  16. #include <linux/bio.h>
  17. #include <linux/fs.h>
  18. #include <linux/list_sort.h>
  19. #include "gfs2.h"
  20. #include "incore.h"
  21. #include "inode.h"
  22. #include "glock.h"
  23. #include "log.h"
  24. #include "lops.h"
  25. #include "meta_io.h"
  26. #include "recovery.h"
  27. #include "rgrp.h"
  28. #include "trans.h"
  29. #include "util.h"
  30. #include "trace_gfs2.h"
  31. /**
  32. * gfs2_pin - Pin a buffer in memory
  33. * @sdp: The superblock
  34. * @bh: The buffer to be pinned
  35. *
  36. * The log lock must be held when calling this function
  37. */
  38. void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
  39. {
  40. struct gfs2_bufdata *bd;
  41. BUG_ON(!current->journal_info);
  42. clear_buffer_dirty(bh);
  43. if (test_set_buffer_pinned(bh))
  44. gfs2_assert_withdraw(sdp, 0);
  45. if (!buffer_uptodate(bh))
  46. gfs2_io_error_bh(sdp, bh);
  47. bd = bh->b_private;
  48. /* If this buffer is in the AIL and it has already been written
  49. * to in-place disk block, remove it from the AIL.
  50. */
  51. spin_lock(&sdp->sd_ail_lock);
  52. if (bd->bd_tr)
  53. list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
  54. spin_unlock(&sdp->sd_ail_lock);
  55. get_bh(bh);
  56. atomic_inc(&sdp->sd_log_pinned);
  57. trace_gfs2_pin(bd, 1);
  58. }
  59. static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
  60. {
  61. return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
  62. }
  63. static void maybe_release_space(struct gfs2_bufdata *bd)
  64. {
  65. struct gfs2_glock *gl = bd->bd_gl;
  66. struct gfs2_sbd *sdp = gl->gl_sbd;
  67. struct gfs2_rgrpd *rgd = gl->gl_object;
  68. unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
  69. struct gfs2_bitmap *bi = rgd->rd_bits + index;
  70. if (bi->bi_clone == 0)
  71. return;
  72. if (sdp->sd_args.ar_discard)
  73. gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
  74. memcpy(bi->bi_clone + bi->bi_offset,
  75. bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
  76. clear_bit(GBF_FULL, &bi->bi_flags);
  77. rgd->rd_free_clone = rgd->rd_free;
  78. rgd->rd_extfail_pt = rgd->rd_free;
  79. }
  80. /**
  81. * gfs2_unpin - Unpin a buffer
  82. * @sdp: the filesystem the buffer belongs to
  83. * @bh: The buffer to unpin
  84. * @ai:
  85. * @flags: The inode dirty flags
  86. *
  87. */
  88. static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
  89. struct gfs2_trans *tr)
  90. {
  91. struct gfs2_bufdata *bd = bh->b_private;
  92. BUG_ON(!buffer_uptodate(bh));
  93. BUG_ON(!buffer_pinned(bh));
  94. lock_buffer(bh);
  95. mark_buffer_dirty(bh);
  96. clear_buffer_pinned(bh);
  97. if (buffer_is_rgrp(bd))
  98. maybe_release_space(bd);
  99. spin_lock(&sdp->sd_ail_lock);
  100. if (bd->bd_tr) {
  101. list_del(&bd->bd_ail_st_list);
  102. brelse(bh);
  103. } else {
  104. struct gfs2_glock *gl = bd->bd_gl;
  105. list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
  106. atomic_inc(&gl->gl_ail_count);
  107. }
  108. bd->bd_tr = tr;
  109. list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
  110. spin_unlock(&sdp->sd_ail_lock);
  111. clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
  112. trace_gfs2_pin(bd, 0);
  113. unlock_buffer(bh);
  114. atomic_dec(&sdp->sd_log_pinned);
  115. }
  116. static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
  117. {
  118. BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
  119. (sdp->sd_log_flush_head != sdp->sd_log_head));
  120. if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
  121. sdp->sd_log_flush_head = 0;
  122. sdp->sd_log_flush_wrapped = 1;
  123. }
  124. }
  125. static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
  126. {
  127. unsigned int lbn = sdp->sd_log_flush_head;
  128. struct gfs2_journal_extent *je;
  129. u64 block;
  130. list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
  131. if (lbn >= je->lblock && lbn < je->lblock + je->blocks) {
  132. block = je->dblock + lbn - je->lblock;
  133. gfs2_log_incr_head(sdp);
  134. return block;
  135. }
  136. }
  137. return -1;
  138. }
  139. /**
  140. * gfs2_end_log_write_bh - end log write of pagecache data with buffers
  141. * @sdp: The superblock
  142. * @bvec: The bio_vec
  143. * @error: The i/o status
  144. *
  145. * This finds the relavent buffers and unlocks then and sets the
  146. * error flag according to the status of the i/o request. This is
  147. * used when the log is writing data which has an in-place version
  148. * that is pinned in the pagecache.
  149. */
  150. static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
  151. int error)
  152. {
  153. struct buffer_head *bh, *next;
  154. struct page *page = bvec->bv_page;
  155. unsigned size;
  156. bh = page_buffers(page);
  157. size = bvec->bv_len;
  158. while (bh_offset(bh) < bvec->bv_offset)
  159. bh = bh->b_this_page;
  160. do {
  161. if (error)
  162. set_buffer_write_io_error(bh);
  163. unlock_buffer(bh);
  164. next = bh->b_this_page;
  165. size -= bh->b_size;
  166. brelse(bh);
  167. bh = next;
  168. } while(bh && size);
  169. }
  170. /**
  171. * gfs2_end_log_write - end of i/o to the log
  172. * @bio: The bio
  173. * @error: Status of i/o request
  174. *
  175. * Each bio_vec contains either data from the pagecache or data
  176. * relating to the log itself. Here we iterate over the bio_vec
  177. * array, processing both kinds of data.
  178. *
  179. */
  180. static void gfs2_end_log_write(struct bio *bio, int error)
  181. {
  182. struct gfs2_sbd *sdp = bio->bi_private;
  183. struct bio_vec *bvec;
  184. struct page *page;
  185. int i;
  186. if (error) {
  187. sdp->sd_log_error = error;
  188. fs_err(sdp, "Error %d writing to log\n", error);
  189. }
  190. bio_for_each_segment_all(bvec, bio, i) {
  191. page = bvec->bv_page;
  192. if (page_has_buffers(page))
  193. gfs2_end_log_write_bh(sdp, bvec, error);
  194. else
  195. mempool_free(page, gfs2_page_pool);
  196. }
  197. bio_put(bio);
  198. if (atomic_dec_and_test(&sdp->sd_log_in_flight))
  199. wake_up(&sdp->sd_log_flush_wait);
  200. }
  201. /**
  202. * gfs2_log_flush_bio - Submit any pending log bio
  203. * @sdp: The superblock
  204. * @rw: The rw flags
  205. *
  206. * Submit any pending part-built or full bio to the block device. If
  207. * there is no pending bio, then this is a no-op.
  208. */
  209. void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
  210. {
  211. if (sdp->sd_log_bio) {
  212. atomic_inc(&sdp->sd_log_in_flight);
  213. submit_bio(rw, sdp->sd_log_bio);
  214. sdp->sd_log_bio = NULL;
  215. }
  216. }
  217. /**
  218. * gfs2_log_alloc_bio - Allocate a new bio for log writing
  219. * @sdp: The superblock
  220. * @blkno: The next device block number we want to write to
  221. *
  222. * This should never be called when there is a cached bio in the
  223. * super block. When it returns, there will be a cached bio in the
  224. * super block which will have as many bio_vecs as the device is
  225. * happy to handle.
  226. *
  227. * Returns: Newly allocated bio
  228. */
  229. static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
  230. {
  231. struct super_block *sb = sdp->sd_vfs;
  232. unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev);
  233. struct bio *bio;
  234. BUG_ON(sdp->sd_log_bio);
  235. while (1) {
  236. bio = bio_alloc(GFP_NOIO, nrvecs);
  237. if (likely(bio))
  238. break;
  239. nrvecs = max(nrvecs/2, 1U);
  240. }
  241. bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
  242. bio->bi_bdev = sb->s_bdev;
  243. bio->bi_end_io = gfs2_end_log_write;
  244. bio->bi_private = sdp;
  245. sdp->sd_log_bio = bio;
  246. return bio;
  247. }
  248. /**
  249. * gfs2_log_get_bio - Get cached log bio, or allocate a new one
  250. * @sdp: The superblock
  251. * @blkno: The device block number we want to write to
  252. *
  253. * If there is a cached bio, then if the next block number is sequential
  254. * with the previous one, return it, otherwise flush the bio to the
  255. * device. If there is not a cached bio, or we just flushed it, then
  256. * allocate a new one.
  257. *
  258. * Returns: The bio to use for log writes
  259. */
  260. static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
  261. {
  262. struct bio *bio = sdp->sd_log_bio;
  263. u64 nblk;
  264. if (bio) {
  265. nblk = bio_end_sector(bio);
  266. nblk >>= sdp->sd_fsb2bb_shift;
  267. if (blkno == nblk)
  268. return bio;
  269. gfs2_log_flush_bio(sdp, WRITE);
  270. }
  271. return gfs2_log_alloc_bio(sdp, blkno);
  272. }
  273. /**
  274. * gfs2_log_write - write to log
  275. * @sdp: the filesystem
  276. * @page: the page to write
  277. * @size: the size of the data to write
  278. * @offset: the offset within the page
  279. *
  280. * Try and add the page segment to the current bio. If that fails,
  281. * submit the current bio to the device and create a new one, and
  282. * then add the page segment to that.
  283. */
  284. static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
  285. unsigned size, unsigned offset)
  286. {
  287. u64 blkno = gfs2_log_bmap(sdp);
  288. struct bio *bio;
  289. int ret;
  290. bio = gfs2_log_get_bio(sdp, blkno);
  291. ret = bio_add_page(bio, page, size, offset);
  292. if (ret == 0) {
  293. gfs2_log_flush_bio(sdp, WRITE);
  294. bio = gfs2_log_alloc_bio(sdp, blkno);
  295. ret = bio_add_page(bio, page, size, offset);
  296. WARN_ON(ret == 0);
  297. }
  298. }
  299. /**
  300. * gfs2_log_write_bh - write a buffer's content to the log
  301. * @sdp: The super block
  302. * @bh: The buffer pointing to the in-place location
  303. *
  304. * This writes the content of the buffer to the next available location
  305. * in the log. The buffer will be unlocked once the i/o to the log has
  306. * completed.
  307. */
  308. static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
  309. {
  310. gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
  311. }
  312. /**
  313. * gfs2_log_write_page - write one block stored in a page, into the log
  314. * @sdp: The superblock
  315. * @page: The struct page
  316. *
  317. * This writes the first block-sized part of the page into the log. Note
  318. * that the page must have been allocated from the gfs2_page_pool mempool
  319. * and that after this has been called, ownership has been transferred and
  320. * the page may be freed at any time.
  321. */
  322. void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
  323. {
  324. struct super_block *sb = sdp->sd_vfs;
  325. gfs2_log_write(sdp, page, sb->s_blocksize, 0);
  326. }
  327. static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
  328. u32 ld_length, u32 ld_data1)
  329. {
  330. struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
  331. struct gfs2_log_descriptor *ld = page_address(page);
  332. clear_page(ld);
  333. ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  334. ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
  335. ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
  336. ld->ld_type = cpu_to_be32(ld_type);
  337. ld->ld_length = cpu_to_be32(ld_length);
  338. ld->ld_data1 = cpu_to_be32(ld_data1);
  339. ld->ld_data2 = 0;
  340. return page;
  341. }
  342. static void gfs2_check_magic(struct buffer_head *bh)
  343. {
  344. void *kaddr;
  345. __be32 *ptr;
  346. clear_buffer_escaped(bh);
  347. kaddr = kmap_atomic(bh->b_page);
  348. ptr = kaddr + bh_offset(bh);
  349. if (*ptr == cpu_to_be32(GFS2_MAGIC))
  350. set_buffer_escaped(bh);
  351. kunmap_atomic(kaddr);
  352. }
  353. static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
  354. {
  355. struct gfs2_bufdata *bda, *bdb;
  356. bda = list_entry(a, struct gfs2_bufdata, bd_list);
  357. bdb = list_entry(b, struct gfs2_bufdata, bd_list);
  358. if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
  359. return -1;
  360. if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
  361. return 1;
  362. return 0;
  363. }
  364. static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
  365. unsigned int total, struct list_head *blist,
  366. bool is_databuf)
  367. {
  368. struct gfs2_log_descriptor *ld;
  369. struct gfs2_bufdata *bd1 = NULL, *bd2;
  370. struct page *page;
  371. unsigned int num;
  372. unsigned n;
  373. __be64 *ptr;
  374. gfs2_log_lock(sdp);
  375. list_sort(NULL, blist, blocknr_cmp);
  376. bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
  377. while(total) {
  378. num = total;
  379. if (total > limit)
  380. num = limit;
  381. gfs2_log_unlock(sdp);
  382. page = gfs2_get_log_desc(sdp,
  383. is_databuf ? GFS2_LOG_DESC_JDATA :
  384. GFS2_LOG_DESC_METADATA, num + 1, num);
  385. ld = page_address(page);
  386. gfs2_log_lock(sdp);
  387. ptr = (__be64 *)(ld + 1);
  388. n = 0;
  389. list_for_each_entry_continue(bd1, blist, bd_list) {
  390. *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
  391. if (is_databuf) {
  392. gfs2_check_magic(bd1->bd_bh);
  393. *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
  394. }
  395. if (++n >= num)
  396. break;
  397. }
  398. gfs2_log_unlock(sdp);
  399. gfs2_log_write_page(sdp, page);
  400. gfs2_log_lock(sdp);
  401. n = 0;
  402. list_for_each_entry_continue(bd2, blist, bd_list) {
  403. get_bh(bd2->bd_bh);
  404. gfs2_log_unlock(sdp);
  405. lock_buffer(bd2->bd_bh);
  406. if (buffer_escaped(bd2->bd_bh)) {
  407. void *kaddr;
  408. page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
  409. ptr = page_address(page);
  410. kaddr = kmap_atomic(bd2->bd_bh->b_page);
  411. memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
  412. bd2->bd_bh->b_size);
  413. kunmap_atomic(kaddr);
  414. *(__be32 *)ptr = 0;
  415. clear_buffer_escaped(bd2->bd_bh);
  416. unlock_buffer(bd2->bd_bh);
  417. brelse(bd2->bd_bh);
  418. gfs2_log_write_page(sdp, page);
  419. } else {
  420. gfs2_log_write_bh(sdp, bd2->bd_bh);
  421. }
  422. gfs2_log_lock(sdp);
  423. if (++n >= num)
  424. break;
  425. }
  426. BUG_ON(total < num);
  427. total -= num;
  428. }
  429. gfs2_log_unlock(sdp);
  430. }
  431. static void buf_lo_before_commit(struct gfs2_sbd *sdp)
  432. {
  433. unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
  434. gfs2_before_commit(sdp, limit, sdp->sd_log_num_buf,
  435. &sdp->sd_log_le_buf, 0);
  436. }
  437. static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  438. {
  439. struct list_head *head = &sdp->sd_log_le_buf;
  440. struct gfs2_bufdata *bd;
  441. if (tr == NULL) {
  442. gfs2_assert(sdp, list_empty(head));
  443. return;
  444. }
  445. while (!list_empty(head)) {
  446. bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
  447. list_del_init(&bd->bd_list);
  448. sdp->sd_log_num_buf--;
  449. gfs2_unpin(sdp, bd->bd_bh, tr);
  450. }
  451. gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
  452. }
  453. static void buf_lo_before_scan(struct gfs2_jdesc *jd,
  454. struct gfs2_log_header_host *head, int pass)
  455. {
  456. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  457. if (pass != 0)
  458. return;
  459. sdp->sd_found_blocks = 0;
  460. sdp->sd_replayed_blocks = 0;
  461. }
  462. static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  463. struct gfs2_log_descriptor *ld, __be64 *ptr,
  464. int pass)
  465. {
  466. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  467. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  468. struct gfs2_glock *gl = ip->i_gl;
  469. unsigned int blks = be32_to_cpu(ld->ld_data1);
  470. struct buffer_head *bh_log, *bh_ip;
  471. u64 blkno;
  472. int error = 0;
  473. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
  474. return 0;
  475. gfs2_replay_incr_blk(sdp, &start);
  476. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  477. blkno = be64_to_cpu(*ptr++);
  478. sdp->sd_found_blocks++;
  479. if (gfs2_revoke_check(sdp, blkno, start))
  480. continue;
  481. error = gfs2_replay_read_block(jd, start, &bh_log);
  482. if (error)
  483. return error;
  484. bh_ip = gfs2_meta_new(gl, blkno);
  485. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  486. if (gfs2_meta_check(sdp, bh_ip))
  487. error = -EIO;
  488. else
  489. mark_buffer_dirty(bh_ip);
  490. brelse(bh_log);
  491. brelse(bh_ip);
  492. if (error)
  493. break;
  494. sdp->sd_replayed_blocks++;
  495. }
  496. return error;
  497. }
  498. /**
  499. * gfs2_meta_sync - Sync all buffers associated with a glock
  500. * @gl: The glock
  501. *
  502. */
  503. static void gfs2_meta_sync(struct gfs2_glock *gl)
  504. {
  505. struct address_space *mapping = gfs2_glock2aspace(gl);
  506. struct gfs2_sbd *sdp = gl->gl_sbd;
  507. int error;
  508. if (mapping == NULL)
  509. mapping = &sdp->sd_aspace;
  510. filemap_fdatawrite(mapping);
  511. error = filemap_fdatawait(mapping);
  512. if (error)
  513. gfs2_io_error(gl->gl_sbd);
  514. }
  515. static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  516. {
  517. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  518. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  519. if (error) {
  520. gfs2_meta_sync(ip->i_gl);
  521. return;
  522. }
  523. if (pass != 1)
  524. return;
  525. gfs2_meta_sync(ip->i_gl);
  526. fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
  527. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  528. }
  529. static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
  530. {
  531. struct gfs2_meta_header *mh;
  532. unsigned int offset;
  533. struct list_head *head = &sdp->sd_log_le_revoke;
  534. struct gfs2_bufdata *bd;
  535. struct page *page;
  536. unsigned int length;
  537. gfs2_write_revokes(sdp);
  538. if (!sdp->sd_log_num_revoke)
  539. return;
  540. length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
  541. page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
  542. offset = sizeof(struct gfs2_log_descriptor);
  543. list_for_each_entry(bd, head, bd_list) {
  544. sdp->sd_log_num_revoke--;
  545. if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
  546. gfs2_log_write_page(sdp, page);
  547. page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
  548. mh = page_address(page);
  549. clear_page(mh);
  550. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  551. mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
  552. mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
  553. offset = sizeof(struct gfs2_meta_header);
  554. }
  555. *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
  556. offset += sizeof(u64);
  557. }
  558. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
  559. gfs2_log_write_page(sdp, page);
  560. }
  561. static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  562. {
  563. struct list_head *head = &sdp->sd_log_le_revoke;
  564. struct gfs2_bufdata *bd;
  565. struct gfs2_glock *gl;
  566. while (!list_empty(head)) {
  567. bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
  568. list_del_init(&bd->bd_list);
  569. gl = bd->bd_gl;
  570. atomic_dec(&gl->gl_revokes);
  571. clear_bit(GLF_LFLUSH, &gl->gl_flags);
  572. kmem_cache_free(gfs2_bufdata_cachep, bd);
  573. }
  574. }
  575. static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
  576. struct gfs2_log_header_host *head, int pass)
  577. {
  578. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  579. if (pass != 0)
  580. return;
  581. sdp->sd_found_revokes = 0;
  582. sdp->sd_replay_tail = head->lh_tail;
  583. }
  584. static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  585. struct gfs2_log_descriptor *ld, __be64 *ptr,
  586. int pass)
  587. {
  588. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  589. unsigned int blks = be32_to_cpu(ld->ld_length);
  590. unsigned int revokes = be32_to_cpu(ld->ld_data1);
  591. struct buffer_head *bh;
  592. unsigned int offset;
  593. u64 blkno;
  594. int first = 1;
  595. int error;
  596. if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
  597. return 0;
  598. offset = sizeof(struct gfs2_log_descriptor);
  599. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  600. error = gfs2_replay_read_block(jd, start, &bh);
  601. if (error)
  602. return error;
  603. if (!first)
  604. gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
  605. while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
  606. blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
  607. error = gfs2_revoke_add(sdp, blkno, start);
  608. if (error < 0) {
  609. brelse(bh);
  610. return error;
  611. }
  612. else if (error)
  613. sdp->sd_found_revokes++;
  614. if (!--revokes)
  615. break;
  616. offset += sizeof(u64);
  617. }
  618. brelse(bh);
  619. offset = sizeof(struct gfs2_meta_header);
  620. first = 0;
  621. }
  622. return 0;
  623. }
  624. static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  625. {
  626. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  627. if (error) {
  628. gfs2_revoke_clean(sdp);
  629. return;
  630. }
  631. if (pass != 1)
  632. return;
  633. fs_info(sdp, "jid=%u: Found %u revoke tags\n",
  634. jd->jd_jid, sdp->sd_found_revokes);
  635. gfs2_revoke_clean(sdp);
  636. }
  637. /**
  638. * databuf_lo_before_commit - Scan the data buffers, writing as we go
  639. *
  640. */
  641. static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
  642. {
  643. unsigned int limit = buf_limit(sdp) / 2;
  644. gfs2_before_commit(sdp, limit, sdp->sd_log_num_databuf,
  645. &sdp->sd_log_le_databuf, 1);
  646. }
  647. static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  648. struct gfs2_log_descriptor *ld,
  649. __be64 *ptr, int pass)
  650. {
  651. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  652. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  653. struct gfs2_glock *gl = ip->i_gl;
  654. unsigned int blks = be32_to_cpu(ld->ld_data1);
  655. struct buffer_head *bh_log, *bh_ip;
  656. u64 blkno;
  657. u64 esc;
  658. int error = 0;
  659. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
  660. return 0;
  661. gfs2_replay_incr_blk(sdp, &start);
  662. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  663. blkno = be64_to_cpu(*ptr++);
  664. esc = be64_to_cpu(*ptr++);
  665. sdp->sd_found_blocks++;
  666. if (gfs2_revoke_check(sdp, blkno, start))
  667. continue;
  668. error = gfs2_replay_read_block(jd, start, &bh_log);
  669. if (error)
  670. return error;
  671. bh_ip = gfs2_meta_new(gl, blkno);
  672. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  673. /* Unescape */
  674. if (esc) {
  675. __be32 *eptr = (__be32 *)bh_ip->b_data;
  676. *eptr = cpu_to_be32(GFS2_MAGIC);
  677. }
  678. mark_buffer_dirty(bh_ip);
  679. brelse(bh_log);
  680. brelse(bh_ip);
  681. sdp->sd_replayed_blocks++;
  682. }
  683. return error;
  684. }
  685. /* FIXME: sort out accounting for log blocks etc. */
  686. static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  687. {
  688. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  689. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  690. if (error) {
  691. gfs2_meta_sync(ip->i_gl);
  692. return;
  693. }
  694. if (pass != 1)
  695. return;
  696. /* data sync? */
  697. gfs2_meta_sync(ip->i_gl);
  698. fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
  699. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  700. }
  701. static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  702. {
  703. struct list_head *head = &sdp->sd_log_le_databuf;
  704. struct gfs2_bufdata *bd;
  705. if (tr == NULL) {
  706. gfs2_assert(sdp, list_empty(head));
  707. return;
  708. }
  709. while (!list_empty(head)) {
  710. bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
  711. list_del_init(&bd->bd_list);
  712. sdp->sd_log_num_databuf--;
  713. gfs2_unpin(sdp, bd->bd_bh, tr);
  714. }
  715. gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
  716. }
  717. const struct gfs2_log_operations gfs2_buf_lops = {
  718. .lo_before_commit = buf_lo_before_commit,
  719. .lo_after_commit = buf_lo_after_commit,
  720. .lo_before_scan = buf_lo_before_scan,
  721. .lo_scan_elements = buf_lo_scan_elements,
  722. .lo_after_scan = buf_lo_after_scan,
  723. .lo_name = "buf",
  724. };
  725. const struct gfs2_log_operations gfs2_revoke_lops = {
  726. .lo_before_commit = revoke_lo_before_commit,
  727. .lo_after_commit = revoke_lo_after_commit,
  728. .lo_before_scan = revoke_lo_before_scan,
  729. .lo_scan_elements = revoke_lo_scan_elements,
  730. .lo_after_scan = revoke_lo_after_scan,
  731. .lo_name = "revoke",
  732. };
  733. const struct gfs2_log_operations gfs2_databuf_lops = {
  734. .lo_before_commit = databuf_lo_before_commit,
  735. .lo_after_commit = databuf_lo_after_commit,
  736. .lo_scan_elements = databuf_lo_scan_elements,
  737. .lo_after_scan = databuf_lo_after_scan,
  738. .lo_name = "databuf",
  739. };
  740. const struct gfs2_log_operations *gfs2_log_ops[] = {
  741. &gfs2_databuf_lops,
  742. &gfs2_buf_lops,
  743. &gfs2_revoke_lops,
  744. NULL,
  745. };