checkpoint.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260
  1. /*
  2. * fs/f2fs/checkpoint.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/bio.h>
  13. #include <linux/mpage.h>
  14. #include <linux/writeback.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/f2fs_fs.h>
  17. #include <linux/pagevec.h>
  18. #include <linux/swap.h>
  19. #include "f2fs.h"
  20. #include "node.h"
  21. #include "segment.h"
  22. #include "trace.h"
  23. #include <trace/events/f2fs.h>
  24. static struct kmem_cache *ino_entry_slab;
  25. struct kmem_cache *inode_entry_slab;
  26. void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
  27. {
  28. set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
  29. sbi->sb->s_flags |= MS_RDONLY;
  30. if (!end_io)
  31. f2fs_flush_merged_bios(sbi);
  32. }
  33. /*
  34. * We guarantee no failure on the returned page.
  35. */
  36. struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
  37. {
  38. struct address_space *mapping = META_MAPPING(sbi);
  39. struct page *page = NULL;
  40. repeat:
  41. page = f2fs_grab_cache_page(mapping, index, false);
  42. if (!page) {
  43. cond_resched();
  44. goto repeat;
  45. }
  46. f2fs_wait_on_page_writeback(page, META, true);
  47. if (!PageUptodate(page))
  48. SetPageUptodate(page);
  49. return page;
  50. }
  51. /*
  52. * We guarantee no failure on the returned page.
  53. */
  54. static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
  55. bool is_meta)
  56. {
  57. struct address_space *mapping = META_MAPPING(sbi);
  58. struct page *page;
  59. struct f2fs_io_info fio = {
  60. .sbi = sbi,
  61. .type = META,
  62. .op = REQ_OP_READ,
  63. .op_flags = READ_SYNC | REQ_META | REQ_PRIO,
  64. .old_blkaddr = index,
  65. .new_blkaddr = index,
  66. .encrypted_page = NULL,
  67. };
  68. if (unlikely(!is_meta))
  69. fio.op_flags &= ~REQ_META;
  70. repeat:
  71. page = f2fs_grab_cache_page(mapping, index, false);
  72. if (!page) {
  73. cond_resched();
  74. goto repeat;
  75. }
  76. if (PageUptodate(page))
  77. goto out;
  78. fio.page = page;
  79. if (f2fs_submit_page_bio(&fio)) {
  80. f2fs_put_page(page, 1);
  81. goto repeat;
  82. }
  83. lock_page(page);
  84. if (unlikely(page->mapping != mapping)) {
  85. f2fs_put_page(page, 1);
  86. goto repeat;
  87. }
  88. /*
  89. * if there is any IO error when accessing device, make our filesystem
  90. * readonly and make sure do not write checkpoint with non-uptodate
  91. * meta page.
  92. */
  93. if (unlikely(!PageUptodate(page)))
  94. f2fs_stop_checkpoint(sbi, false);
  95. out:
  96. return page;
  97. }
  98. struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
  99. {
  100. return __get_meta_page(sbi, index, true);
  101. }
  102. /* for POR only */
  103. struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
  104. {
  105. return __get_meta_page(sbi, index, false);
  106. }
  107. bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
  108. {
  109. switch (type) {
  110. case META_NAT:
  111. break;
  112. case META_SIT:
  113. if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
  114. return false;
  115. break;
  116. case META_SSA:
  117. if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
  118. blkaddr < SM_I(sbi)->ssa_blkaddr))
  119. return false;
  120. break;
  121. case META_CP:
  122. if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
  123. blkaddr < __start_cp_addr(sbi)))
  124. return false;
  125. break;
  126. case META_POR:
  127. if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
  128. blkaddr < MAIN_BLKADDR(sbi)))
  129. return false;
  130. break;
  131. default:
  132. BUG();
  133. }
  134. return true;
  135. }
  136. /*
  137. * Readahead CP/NAT/SIT/SSA pages
  138. */
  139. int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
  140. int type, bool sync)
  141. {
  142. struct page *page;
  143. block_t blkno = start;
  144. struct f2fs_io_info fio = {
  145. .sbi = sbi,
  146. .type = META,
  147. .op = REQ_OP_READ,
  148. .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD,
  149. .encrypted_page = NULL,
  150. };
  151. struct blk_plug plug;
  152. if (unlikely(type == META_POR))
  153. fio.op_flags &= ~REQ_META;
  154. blk_start_plug(&plug);
  155. for (; nrpages-- > 0; blkno++) {
  156. if (!is_valid_blkaddr(sbi, blkno, type))
  157. goto out;
  158. switch (type) {
  159. case META_NAT:
  160. if (unlikely(blkno >=
  161. NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
  162. blkno = 0;
  163. /* get nat block addr */
  164. fio.new_blkaddr = current_nat_addr(sbi,
  165. blkno * NAT_ENTRY_PER_BLOCK);
  166. break;
  167. case META_SIT:
  168. /* get sit block addr */
  169. fio.new_blkaddr = current_sit_addr(sbi,
  170. blkno * SIT_ENTRY_PER_BLOCK);
  171. break;
  172. case META_SSA:
  173. case META_CP:
  174. case META_POR:
  175. fio.new_blkaddr = blkno;
  176. break;
  177. default:
  178. BUG();
  179. }
  180. page = f2fs_grab_cache_page(META_MAPPING(sbi),
  181. fio.new_blkaddr, false);
  182. if (!page)
  183. continue;
  184. if (PageUptodate(page)) {
  185. f2fs_put_page(page, 1);
  186. continue;
  187. }
  188. fio.page = page;
  189. fio.old_blkaddr = fio.new_blkaddr;
  190. f2fs_submit_page_mbio(&fio);
  191. f2fs_put_page(page, 0);
  192. }
  193. out:
  194. f2fs_submit_merged_bio(sbi, META, READ);
  195. blk_finish_plug(&plug);
  196. return blkno - start;
  197. }
  198. void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
  199. {
  200. struct page *page;
  201. bool readahead = false;
  202. page = find_get_page(META_MAPPING(sbi), index);
  203. if (!page || !PageUptodate(page))
  204. readahead = true;
  205. f2fs_put_page(page, 0);
  206. if (readahead)
  207. ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true);
  208. }
  209. static int f2fs_write_meta_page(struct page *page,
  210. struct writeback_control *wbc)
  211. {
  212. struct f2fs_sb_info *sbi = F2FS_P_SB(page);
  213. trace_f2fs_writepage(page, META);
  214. if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
  215. goto redirty_out;
  216. if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
  217. goto redirty_out;
  218. if (unlikely(f2fs_cp_error(sbi)))
  219. goto redirty_out;
  220. write_meta_page(sbi, page);
  221. dec_page_count(sbi, F2FS_DIRTY_META);
  222. if (wbc->for_reclaim)
  223. f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, META, WRITE);
  224. unlock_page(page);
  225. if (unlikely(f2fs_cp_error(sbi)))
  226. f2fs_submit_merged_bio(sbi, META, WRITE);
  227. return 0;
  228. redirty_out:
  229. redirty_page_for_writepage(wbc, page);
  230. return AOP_WRITEPAGE_ACTIVATE;
  231. }
  232. static int f2fs_write_meta_pages(struct address_space *mapping,
  233. struct writeback_control *wbc)
  234. {
  235. struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
  236. struct blk_plug plug;
  237. long diff, written;
  238. /* collect a number of dirty meta pages and write together */
  239. if (wbc->for_kupdate ||
  240. get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
  241. goto skip_write;
  242. trace_f2fs_writepages(mapping->host, wbc, META);
  243. /* if mounting is failed, skip writing node pages */
  244. mutex_lock(&sbi->cp_mutex);
  245. diff = nr_pages_to_write(sbi, META, wbc);
  246. blk_start_plug(&plug);
  247. written = sync_meta_pages(sbi, META, wbc->nr_to_write);
  248. blk_finish_plug(&plug);
  249. mutex_unlock(&sbi->cp_mutex);
  250. wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
  251. return 0;
  252. skip_write:
  253. wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
  254. trace_f2fs_writepages(mapping->host, wbc, META);
  255. return 0;
  256. }
  257. long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
  258. long nr_to_write)
  259. {
  260. struct address_space *mapping = META_MAPPING(sbi);
  261. pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
  262. struct pagevec pvec;
  263. long nwritten = 0;
  264. struct writeback_control wbc = {
  265. .for_reclaim = 0,
  266. };
  267. struct blk_plug plug;
  268. pagevec_init(&pvec, 0);
  269. blk_start_plug(&plug);
  270. while (index <= end) {
  271. int i, nr_pages;
  272. nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  273. PAGECACHE_TAG_DIRTY,
  274. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
  275. if (unlikely(nr_pages == 0))
  276. break;
  277. for (i = 0; i < nr_pages; i++) {
  278. struct page *page = pvec.pages[i];
  279. if (prev == ULONG_MAX)
  280. prev = page->index - 1;
  281. if (nr_to_write != LONG_MAX && page->index != prev + 1) {
  282. pagevec_release(&pvec);
  283. goto stop;
  284. }
  285. lock_page(page);
  286. if (unlikely(page->mapping != mapping)) {
  287. continue_unlock:
  288. unlock_page(page);
  289. continue;
  290. }
  291. if (!PageDirty(page)) {
  292. /* someone wrote it for us */
  293. goto continue_unlock;
  294. }
  295. f2fs_wait_on_page_writeback(page, META, true);
  296. BUG_ON(PageWriteback(page));
  297. if (!clear_page_dirty_for_io(page))
  298. goto continue_unlock;
  299. if (mapping->a_ops->writepage(page, &wbc)) {
  300. unlock_page(page);
  301. break;
  302. }
  303. nwritten++;
  304. prev = page->index;
  305. if (unlikely(nwritten >= nr_to_write))
  306. break;
  307. }
  308. pagevec_release(&pvec);
  309. cond_resched();
  310. }
  311. stop:
  312. if (nwritten)
  313. f2fs_submit_merged_bio(sbi, type, WRITE);
  314. blk_finish_plug(&plug);
  315. return nwritten;
  316. }
  317. static int f2fs_set_meta_page_dirty(struct page *page)
  318. {
  319. trace_f2fs_set_page_dirty(page, META);
  320. if (!PageUptodate(page))
  321. SetPageUptodate(page);
  322. if (!PageDirty(page)) {
  323. f2fs_set_page_dirty_nobuffers(page);
  324. inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
  325. SetPagePrivate(page);
  326. f2fs_trace_pid(page);
  327. return 1;
  328. }
  329. return 0;
  330. }
  331. const struct address_space_operations f2fs_meta_aops = {
  332. .writepage = f2fs_write_meta_page,
  333. .writepages = f2fs_write_meta_pages,
  334. .set_page_dirty = f2fs_set_meta_page_dirty,
  335. .invalidatepage = f2fs_invalidate_page,
  336. .releasepage = f2fs_release_page,
  337. };
  338. static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
  339. {
  340. struct inode_management *im = &sbi->im[type];
  341. struct ino_entry *e, *tmp;
  342. tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
  343. retry:
  344. radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
  345. spin_lock(&im->ino_lock);
  346. e = radix_tree_lookup(&im->ino_root, ino);
  347. if (!e) {
  348. e = tmp;
  349. if (radix_tree_insert(&im->ino_root, ino, e)) {
  350. spin_unlock(&im->ino_lock);
  351. radix_tree_preload_end();
  352. goto retry;
  353. }
  354. memset(e, 0, sizeof(struct ino_entry));
  355. e->ino = ino;
  356. list_add_tail(&e->list, &im->ino_list);
  357. if (type != ORPHAN_INO)
  358. im->ino_num++;
  359. }
  360. spin_unlock(&im->ino_lock);
  361. radix_tree_preload_end();
  362. if (e != tmp)
  363. kmem_cache_free(ino_entry_slab, tmp);
  364. }
  365. static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
  366. {
  367. struct inode_management *im = &sbi->im[type];
  368. struct ino_entry *e;
  369. spin_lock(&im->ino_lock);
  370. e = radix_tree_lookup(&im->ino_root, ino);
  371. if (e) {
  372. list_del(&e->list);
  373. radix_tree_delete(&im->ino_root, ino);
  374. im->ino_num--;
  375. spin_unlock(&im->ino_lock);
  376. kmem_cache_free(ino_entry_slab, e);
  377. return;
  378. }
  379. spin_unlock(&im->ino_lock);
  380. }
  381. void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
  382. {
  383. /* add new dirty ino entry into list */
  384. __add_ino_entry(sbi, ino, type);
  385. }
  386. void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
  387. {
  388. /* remove dirty ino entry from list */
  389. __remove_ino_entry(sbi, ino, type);
  390. }
  391. /* mode should be APPEND_INO or UPDATE_INO */
  392. bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
  393. {
  394. struct inode_management *im = &sbi->im[mode];
  395. struct ino_entry *e;
  396. spin_lock(&im->ino_lock);
  397. e = radix_tree_lookup(&im->ino_root, ino);
  398. spin_unlock(&im->ino_lock);
  399. return e ? true : false;
  400. }
  401. void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
  402. {
  403. struct ino_entry *e, *tmp;
  404. int i;
  405. for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) {
  406. struct inode_management *im = &sbi->im[i];
  407. spin_lock(&im->ino_lock);
  408. list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
  409. list_del(&e->list);
  410. radix_tree_delete(&im->ino_root, e->ino);
  411. kmem_cache_free(ino_entry_slab, e);
  412. im->ino_num--;
  413. }
  414. spin_unlock(&im->ino_lock);
  415. }
  416. }
  417. int acquire_orphan_inode(struct f2fs_sb_info *sbi)
  418. {
  419. struct inode_management *im = &sbi->im[ORPHAN_INO];
  420. int err = 0;
  421. spin_lock(&im->ino_lock);
  422. #ifdef CONFIG_F2FS_FAULT_INJECTION
  423. if (time_to_inject(FAULT_ORPHAN)) {
  424. spin_unlock(&im->ino_lock);
  425. return -ENOSPC;
  426. }
  427. #endif
  428. if (unlikely(im->ino_num >= sbi->max_orphans))
  429. err = -ENOSPC;
  430. else
  431. im->ino_num++;
  432. spin_unlock(&im->ino_lock);
  433. return err;
  434. }
  435. void release_orphan_inode(struct f2fs_sb_info *sbi)
  436. {
  437. struct inode_management *im = &sbi->im[ORPHAN_INO];
  438. spin_lock(&im->ino_lock);
  439. f2fs_bug_on(sbi, im->ino_num == 0);
  440. im->ino_num--;
  441. spin_unlock(&im->ino_lock);
  442. }
  443. void add_orphan_inode(struct inode *inode)
  444. {
  445. /* add new orphan ino entry into list */
  446. __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO);
  447. update_inode_page(inode);
  448. }
  449. void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
  450. {
  451. /* remove orphan entry from orphan list */
  452. __remove_ino_entry(sbi, ino, ORPHAN_INO);
  453. }
  454. static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
  455. {
  456. struct inode *inode;
  457. inode = f2fs_iget(sbi->sb, ino);
  458. if (IS_ERR(inode)) {
  459. /*
  460. * there should be a bug that we can't find the entry
  461. * to orphan inode.
  462. */
  463. f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
  464. return PTR_ERR(inode);
  465. }
  466. clear_nlink(inode);
  467. /* truncate all the data during iput */
  468. iput(inode);
  469. return 0;
  470. }
  471. int recover_orphan_inodes(struct f2fs_sb_info *sbi)
  472. {
  473. block_t start_blk, orphan_blocks, i, j;
  474. int err;
  475. if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
  476. return 0;
  477. start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
  478. orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
  479. ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
  480. for (i = 0; i < orphan_blocks; i++) {
  481. struct page *page = get_meta_page(sbi, start_blk + i);
  482. struct f2fs_orphan_block *orphan_blk;
  483. orphan_blk = (struct f2fs_orphan_block *)page_address(page);
  484. for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
  485. nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
  486. err = recover_orphan_inode(sbi, ino);
  487. if (err) {
  488. f2fs_put_page(page, 1);
  489. return err;
  490. }
  491. }
  492. f2fs_put_page(page, 1);
  493. }
  494. /* clear Orphan Flag */
  495. clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
  496. return 0;
  497. }
  498. static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
  499. {
  500. struct list_head *head;
  501. struct f2fs_orphan_block *orphan_blk = NULL;
  502. unsigned int nentries = 0;
  503. unsigned short index = 1;
  504. unsigned short orphan_blocks;
  505. struct page *page = NULL;
  506. struct ino_entry *orphan = NULL;
  507. struct inode_management *im = &sbi->im[ORPHAN_INO];
  508. orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
  509. /*
  510. * we don't need to do spin_lock(&im->ino_lock) here, since all the
  511. * orphan inode operations are covered under f2fs_lock_op().
  512. * And, spin_lock should be avoided due to page operations below.
  513. */
  514. head = &im->ino_list;
  515. /* loop for each orphan inode entry and write them in Jornal block */
  516. list_for_each_entry(orphan, head, list) {
  517. if (!page) {
  518. page = grab_meta_page(sbi, start_blk++);
  519. orphan_blk =
  520. (struct f2fs_orphan_block *)page_address(page);
  521. memset(orphan_blk, 0, sizeof(*orphan_blk));
  522. }
  523. orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
  524. if (nentries == F2FS_ORPHANS_PER_BLOCK) {
  525. /*
  526. * an orphan block is full of 1020 entries,
  527. * then we need to flush current orphan blocks
  528. * and bring another one in memory
  529. */
  530. orphan_blk->blk_addr = cpu_to_le16(index);
  531. orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
  532. orphan_blk->entry_count = cpu_to_le32(nentries);
  533. set_page_dirty(page);
  534. f2fs_put_page(page, 1);
  535. index++;
  536. nentries = 0;
  537. page = NULL;
  538. }
  539. }
  540. if (page) {
  541. orphan_blk->blk_addr = cpu_to_le16(index);
  542. orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
  543. orphan_blk->entry_count = cpu_to_le32(nentries);
  544. set_page_dirty(page);
  545. f2fs_put_page(page, 1);
  546. }
  547. }
  548. static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
  549. block_t cp_addr, unsigned long long *version)
  550. {
  551. struct page *cp_page_1, *cp_page_2 = NULL;
  552. unsigned long blk_size = sbi->blocksize;
  553. struct f2fs_checkpoint *cp_block;
  554. unsigned long long cur_version = 0, pre_version = 0;
  555. size_t crc_offset;
  556. __u32 crc = 0;
  557. /* Read the 1st cp block in this CP pack */
  558. cp_page_1 = get_meta_page(sbi, cp_addr);
  559. /* get the version number */
  560. cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
  561. crc_offset = le32_to_cpu(cp_block->checksum_offset);
  562. if (crc_offset >= blk_size)
  563. goto invalid_cp1;
  564. crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
  565. if (!f2fs_crc_valid(sbi, crc, cp_block, crc_offset))
  566. goto invalid_cp1;
  567. pre_version = cur_cp_version(cp_block);
  568. /* Read the 2nd cp block in this CP pack */
  569. cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
  570. cp_page_2 = get_meta_page(sbi, cp_addr);
  571. cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
  572. crc_offset = le32_to_cpu(cp_block->checksum_offset);
  573. if (crc_offset >= blk_size)
  574. goto invalid_cp2;
  575. crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
  576. if (!f2fs_crc_valid(sbi, crc, cp_block, crc_offset))
  577. goto invalid_cp2;
  578. cur_version = cur_cp_version(cp_block);
  579. if (cur_version == pre_version) {
  580. *version = cur_version;
  581. f2fs_put_page(cp_page_2, 1);
  582. return cp_page_1;
  583. }
  584. invalid_cp2:
  585. f2fs_put_page(cp_page_2, 1);
  586. invalid_cp1:
  587. f2fs_put_page(cp_page_1, 1);
  588. return NULL;
  589. }
  590. int get_valid_checkpoint(struct f2fs_sb_info *sbi)
  591. {
  592. struct f2fs_checkpoint *cp_block;
  593. struct f2fs_super_block *fsb = sbi->raw_super;
  594. struct page *cp1, *cp2, *cur_page;
  595. unsigned long blk_size = sbi->blocksize;
  596. unsigned long long cp1_version = 0, cp2_version = 0;
  597. unsigned long long cp_start_blk_no;
  598. unsigned int cp_blks = 1 + __cp_payload(sbi);
  599. block_t cp_blk_no;
  600. int i;
  601. sbi->ckpt = kzalloc(cp_blks * blk_size, GFP_KERNEL);
  602. if (!sbi->ckpt)
  603. return -ENOMEM;
  604. /*
  605. * Finding out valid cp block involves read both
  606. * sets( cp pack1 and cp pack 2)
  607. */
  608. cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
  609. cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
  610. /* The second checkpoint pack should start at the next segment */
  611. cp_start_blk_no += ((unsigned long long)1) <<
  612. le32_to_cpu(fsb->log_blocks_per_seg);
  613. cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
  614. if (cp1 && cp2) {
  615. if (ver_after(cp2_version, cp1_version))
  616. cur_page = cp2;
  617. else
  618. cur_page = cp1;
  619. } else if (cp1) {
  620. cur_page = cp1;
  621. } else if (cp2) {
  622. cur_page = cp2;
  623. } else {
  624. goto fail_no_cp;
  625. }
  626. cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
  627. memcpy(sbi->ckpt, cp_block, blk_size);
  628. /* Sanity checking of checkpoint */
  629. if (sanity_check_ckpt(sbi))
  630. goto fail_no_cp;
  631. if (cp_blks <= 1)
  632. goto done;
  633. cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
  634. if (cur_page == cp2)
  635. cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
  636. for (i = 1; i < cp_blks; i++) {
  637. void *sit_bitmap_ptr;
  638. unsigned char *ckpt = (unsigned char *)sbi->ckpt;
  639. cur_page = get_meta_page(sbi, cp_blk_no + i);
  640. sit_bitmap_ptr = page_address(cur_page);
  641. memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
  642. f2fs_put_page(cur_page, 1);
  643. }
  644. done:
  645. f2fs_put_page(cp1, 1);
  646. f2fs_put_page(cp2, 1);
  647. return 0;
  648. fail_no_cp:
  649. kfree(sbi->ckpt);
  650. return -EINVAL;
  651. }
  652. static void __add_dirty_inode(struct inode *inode, enum inode_type type)
  653. {
  654. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  655. int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
  656. if (is_inode_flag_set(inode, flag))
  657. return;
  658. set_inode_flag(inode, flag);
  659. list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
  660. stat_inc_dirty_inode(sbi, type);
  661. }
  662. static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
  663. {
  664. int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
  665. if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
  666. return;
  667. list_del_init(&F2FS_I(inode)->dirty_list);
  668. clear_inode_flag(inode, flag);
  669. stat_dec_dirty_inode(F2FS_I_SB(inode), type);
  670. }
  671. void update_dirty_page(struct inode *inode, struct page *page)
  672. {
  673. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  674. enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
  675. if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
  676. !S_ISLNK(inode->i_mode))
  677. return;
  678. spin_lock(&sbi->inode_lock[type]);
  679. if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
  680. __add_dirty_inode(inode, type);
  681. inode_inc_dirty_pages(inode);
  682. spin_unlock(&sbi->inode_lock[type]);
  683. SetPagePrivate(page);
  684. f2fs_trace_pid(page);
  685. }
  686. void remove_dirty_inode(struct inode *inode)
  687. {
  688. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  689. enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
  690. if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
  691. !S_ISLNK(inode->i_mode))
  692. return;
  693. if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
  694. return;
  695. spin_lock(&sbi->inode_lock[type]);
  696. __remove_dirty_inode(inode, type);
  697. spin_unlock(&sbi->inode_lock[type]);
  698. }
  699. int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
  700. {
  701. struct list_head *head;
  702. struct inode *inode;
  703. struct f2fs_inode_info *fi;
  704. bool is_dir = (type == DIR_INODE);
  705. trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
  706. get_pages(sbi, is_dir ?
  707. F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
  708. retry:
  709. if (unlikely(f2fs_cp_error(sbi)))
  710. return -EIO;
  711. spin_lock(&sbi->inode_lock[type]);
  712. head = &sbi->inode_list[type];
  713. if (list_empty(head)) {
  714. spin_unlock(&sbi->inode_lock[type]);
  715. trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
  716. get_pages(sbi, is_dir ?
  717. F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
  718. return 0;
  719. }
  720. fi = list_entry(head->next, struct f2fs_inode_info, dirty_list);
  721. inode = igrab(&fi->vfs_inode);
  722. spin_unlock(&sbi->inode_lock[type]);
  723. if (inode) {
  724. filemap_fdatawrite(inode->i_mapping);
  725. iput(inode);
  726. } else {
  727. /*
  728. * We should submit bio, since it exists several
  729. * wribacking dentry pages in the freeing inode.
  730. */
  731. f2fs_submit_merged_bio(sbi, DATA, WRITE);
  732. cond_resched();
  733. }
  734. goto retry;
  735. }
  736. int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
  737. {
  738. struct list_head *head = &sbi->inode_list[DIRTY_META];
  739. struct inode *inode;
  740. struct f2fs_inode_info *fi;
  741. s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
  742. while (total--) {
  743. if (unlikely(f2fs_cp_error(sbi)))
  744. return -EIO;
  745. spin_lock(&sbi->inode_lock[DIRTY_META]);
  746. if (list_empty(head)) {
  747. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  748. return 0;
  749. }
  750. fi = list_entry(head->next, struct f2fs_inode_info,
  751. gdirty_list);
  752. inode = igrab(&fi->vfs_inode);
  753. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  754. if (inode) {
  755. update_inode_page(inode);
  756. iput(inode);
  757. }
  758. };
  759. return 0;
  760. }
  761. /*
  762. * Freeze all the FS-operations for checkpoint.
  763. */
  764. static int block_operations(struct f2fs_sb_info *sbi)
  765. {
  766. struct writeback_control wbc = {
  767. .sync_mode = WB_SYNC_ALL,
  768. .nr_to_write = LONG_MAX,
  769. .for_reclaim = 0,
  770. };
  771. struct blk_plug plug;
  772. int err = 0;
  773. blk_start_plug(&plug);
  774. retry_flush_dents:
  775. f2fs_lock_all(sbi);
  776. /* write all the dirty dentry pages */
  777. if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
  778. f2fs_unlock_all(sbi);
  779. err = sync_dirty_inodes(sbi, DIR_INODE);
  780. if (err)
  781. goto out;
  782. goto retry_flush_dents;
  783. }
  784. if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
  785. f2fs_unlock_all(sbi);
  786. err = f2fs_sync_inode_meta(sbi);
  787. if (err)
  788. goto out;
  789. goto retry_flush_dents;
  790. }
  791. /*
  792. * POR: we should ensure that there are no dirty node pages
  793. * until finishing nat/sit flush.
  794. */
  795. retry_flush_nodes:
  796. down_write(&sbi->node_write);
  797. if (get_pages(sbi, F2FS_DIRTY_NODES)) {
  798. up_write(&sbi->node_write);
  799. err = sync_node_pages(sbi, &wbc);
  800. if (err) {
  801. f2fs_unlock_all(sbi);
  802. goto out;
  803. }
  804. goto retry_flush_nodes;
  805. }
  806. out:
  807. blk_finish_plug(&plug);
  808. return err;
  809. }
  810. static void unblock_operations(struct f2fs_sb_info *sbi)
  811. {
  812. up_write(&sbi->node_write);
  813. build_free_nids(sbi);
  814. f2fs_unlock_all(sbi);
  815. }
  816. static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
  817. {
  818. DEFINE_WAIT(wait);
  819. for (;;) {
  820. prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
  821. if (!atomic_read(&sbi->nr_wb_bios))
  822. break;
  823. io_schedule_timeout(5*HZ);
  824. }
  825. finish_wait(&sbi->cp_wait, &wait);
  826. }
  827. static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  828. {
  829. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  830. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
  831. struct f2fs_nm_info *nm_i = NM_I(sbi);
  832. unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
  833. nid_t last_nid = nm_i->next_scan_nid;
  834. block_t start_blk;
  835. unsigned int data_sum_blocks, orphan_blocks;
  836. __u32 crc32 = 0;
  837. int i;
  838. int cp_payload_blks = __cp_payload(sbi);
  839. block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
  840. bool invalidate = false;
  841. struct super_block *sb = sbi->sb;
  842. struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
  843. u64 kbytes_written;
  844. /*
  845. * This avoids to conduct wrong roll-forward operations and uses
  846. * metapages, so should be called prior to sync_meta_pages below.
  847. */
  848. if (!test_opt(sbi, LFS) && discard_next_dnode(sbi, discard_blk))
  849. invalidate = true;
  850. /* Flush all the NAT/SIT pages */
  851. while (get_pages(sbi, F2FS_DIRTY_META)) {
  852. sync_meta_pages(sbi, META, LONG_MAX);
  853. if (unlikely(f2fs_cp_error(sbi)))
  854. return -EIO;
  855. }
  856. next_free_nid(sbi, &last_nid);
  857. /*
  858. * modify checkpoint
  859. * version number is already updated
  860. */
  861. ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
  862. ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
  863. ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
  864. for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
  865. ckpt->cur_node_segno[i] =
  866. cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
  867. ckpt->cur_node_blkoff[i] =
  868. cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
  869. ckpt->alloc_type[i + CURSEG_HOT_NODE] =
  870. curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
  871. }
  872. for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
  873. ckpt->cur_data_segno[i] =
  874. cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
  875. ckpt->cur_data_blkoff[i] =
  876. cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
  877. ckpt->alloc_type[i + CURSEG_HOT_DATA] =
  878. curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
  879. }
  880. ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
  881. ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
  882. ckpt->next_free_nid = cpu_to_le32(last_nid);
  883. /* 2 cp + n data seg summary + orphan inode blocks */
  884. data_sum_blocks = npages_for_summary_flush(sbi, false);
  885. if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
  886. set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
  887. else
  888. clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
  889. orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
  890. ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
  891. orphan_blocks);
  892. if (__remain_node_summaries(cpc->reason))
  893. ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
  894. cp_payload_blks + data_sum_blocks +
  895. orphan_blocks + NR_CURSEG_NODE_TYPE);
  896. else
  897. ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
  898. cp_payload_blks + data_sum_blocks +
  899. orphan_blocks);
  900. if (cpc->reason == CP_UMOUNT)
  901. set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
  902. else
  903. clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
  904. if (cpc->reason == CP_FASTBOOT)
  905. set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
  906. else
  907. clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
  908. if (orphan_num)
  909. set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
  910. else
  911. clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
  912. if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
  913. set_ckpt_flags(ckpt, CP_FSCK_FLAG);
  914. /* update SIT/NAT bitmap */
  915. get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
  916. get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
  917. crc32 = f2fs_crc32(sbi, ckpt, le32_to_cpu(ckpt->checksum_offset));
  918. *((__le32 *)((unsigned char *)ckpt +
  919. le32_to_cpu(ckpt->checksum_offset)))
  920. = cpu_to_le32(crc32);
  921. start_blk = __start_cp_addr(sbi);
  922. /* need to wait for end_io results */
  923. wait_on_all_pages_writeback(sbi);
  924. if (unlikely(f2fs_cp_error(sbi)))
  925. return -EIO;
  926. /* write out checkpoint buffer at block 0 */
  927. update_meta_page(sbi, ckpt, start_blk++);
  928. for (i = 1; i < 1 + cp_payload_blks; i++)
  929. update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
  930. start_blk++);
  931. if (orphan_num) {
  932. write_orphan_inodes(sbi, start_blk);
  933. start_blk += orphan_blocks;
  934. }
  935. write_data_summaries(sbi, start_blk);
  936. start_blk += data_sum_blocks;
  937. /* Record write statistics in the hot node summary */
  938. kbytes_written = sbi->kbytes_written;
  939. if (sb->s_bdev->bd_part)
  940. kbytes_written += BD_PART_WRITTEN(sbi);
  941. seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
  942. if (__remain_node_summaries(cpc->reason)) {
  943. write_node_summaries(sbi, start_blk);
  944. start_blk += NR_CURSEG_NODE_TYPE;
  945. }
  946. /* writeout checkpoint block */
  947. update_meta_page(sbi, ckpt, start_blk);
  948. /* wait for previous submitted node/meta pages writeback */
  949. wait_on_all_pages_writeback(sbi);
  950. if (unlikely(f2fs_cp_error(sbi)))
  951. return -EIO;
  952. filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX);
  953. filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX);
  954. /* update user_block_counts */
  955. sbi->last_valid_block_count = sbi->total_valid_block_count;
  956. percpu_counter_set(&sbi->alloc_valid_block_count, 0);
  957. /* Here, we only have one bio having CP pack */
  958. sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
  959. /* wait for previous submitted meta pages writeback */
  960. wait_on_all_pages_writeback(sbi);
  961. /*
  962. * invalidate meta page which is used temporarily for zeroing out
  963. * block at the end of warm node chain.
  964. */
  965. if (invalidate)
  966. invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
  967. discard_blk);
  968. release_ino_entry(sbi, false);
  969. if (unlikely(f2fs_cp_error(sbi)))
  970. return -EIO;
  971. clear_prefree_segments(sbi, cpc);
  972. clear_sbi_flag(sbi, SBI_IS_DIRTY);
  973. return 0;
  974. }
  975. /*
  976. * We guarantee that this checkpoint procedure will not fail.
  977. */
  978. int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  979. {
  980. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  981. unsigned long long ckpt_ver;
  982. int err = 0;
  983. mutex_lock(&sbi->cp_mutex);
  984. if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
  985. (cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
  986. (cpc->reason == CP_DISCARD && !sbi->discard_blks)))
  987. goto out;
  988. if (unlikely(f2fs_cp_error(sbi))) {
  989. err = -EIO;
  990. goto out;
  991. }
  992. if (f2fs_readonly(sbi->sb)) {
  993. err = -EROFS;
  994. goto out;
  995. }
  996. trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
  997. err = block_operations(sbi);
  998. if (err)
  999. goto out;
  1000. trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
  1001. f2fs_flush_merged_bios(sbi);
  1002. /*
  1003. * update checkpoint pack index
  1004. * Increase the version number so that
  1005. * SIT entries and seg summaries are written at correct place
  1006. */
  1007. ckpt_ver = cur_cp_version(ckpt);
  1008. ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
  1009. /* write cached NAT/SIT entries to NAT/SIT area */
  1010. flush_nat_entries(sbi);
  1011. flush_sit_entries(sbi, cpc);
  1012. /* unlock all the fs_lock[] in do_checkpoint() */
  1013. err = do_checkpoint(sbi, cpc);
  1014. unblock_operations(sbi);
  1015. stat_inc_cp_count(sbi->stat_info);
  1016. if (cpc->reason == CP_RECOVERY)
  1017. f2fs_msg(sbi->sb, KERN_NOTICE,
  1018. "checkpoint: version = %llx", ckpt_ver);
  1019. /* do checkpoint periodically */
  1020. f2fs_update_time(sbi, CP_TIME);
  1021. trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
  1022. out:
  1023. mutex_unlock(&sbi->cp_mutex);
  1024. return err;
  1025. }
  1026. void init_ino_entry_info(struct f2fs_sb_info *sbi)
  1027. {
  1028. int i;
  1029. for (i = 0; i < MAX_INO_ENTRY; i++) {
  1030. struct inode_management *im = &sbi->im[i];
  1031. INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
  1032. spin_lock_init(&im->ino_lock);
  1033. INIT_LIST_HEAD(&im->ino_list);
  1034. im->ino_num = 0;
  1035. }
  1036. sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
  1037. NR_CURSEG_TYPE - __cp_payload(sbi)) *
  1038. F2FS_ORPHANS_PER_BLOCK;
  1039. }
  1040. int __init create_checkpoint_caches(void)
  1041. {
  1042. ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
  1043. sizeof(struct ino_entry));
  1044. if (!ino_entry_slab)
  1045. return -ENOMEM;
  1046. inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
  1047. sizeof(struct inode_entry));
  1048. if (!inode_entry_slab) {
  1049. kmem_cache_destroy(ino_entry_slab);
  1050. return -ENOMEM;
  1051. }
  1052. return 0;
  1053. }
  1054. void destroy_checkpoint_caches(void)
  1055. {
  1056. kmem_cache_destroy(ino_entry_slab);
  1057. kmem_cache_destroy(inode_entry_slab);
  1058. }