checkpoint.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251
  1. /*
  2. * fs/f2fs/checkpoint.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/bio.h>
  13. #include <linux/mpage.h>
  14. #include <linux/writeback.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/f2fs_fs.h>
  17. #include <linux/pagevec.h>
  18. #include <linux/swap.h>
  19. #include "f2fs.h"
  20. #include "node.h"
  21. #include "segment.h"
  22. #include "trace.h"
  23. #include <trace/events/f2fs.h>
  24. static struct kmem_cache *ino_entry_slab;
  25. struct kmem_cache *inode_entry_slab;
  26. void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
  27. {
  28. set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
  29. sbi->sb->s_flags |= MS_RDONLY;
  30. if (!end_io)
  31. f2fs_flush_merged_bios(sbi);
  32. }
  33. /*
  34. * We guarantee no failure on the returned page.
  35. */
  36. struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
  37. {
  38. struct address_space *mapping = META_MAPPING(sbi);
  39. struct page *page = NULL;
  40. repeat:
  41. page = f2fs_grab_cache_page(mapping, index, false);
  42. if (!page) {
  43. cond_resched();
  44. goto repeat;
  45. }
  46. f2fs_wait_on_page_writeback(page, META, true);
  47. SetPageUptodate(page);
  48. return page;
  49. }
  50. /*
  51. * We guarantee no failure on the returned page.
  52. */
  53. static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
  54. bool is_meta)
  55. {
  56. struct address_space *mapping = META_MAPPING(sbi);
  57. struct page *page;
  58. struct f2fs_io_info fio = {
  59. .sbi = sbi,
  60. .type = META,
  61. .rw = READ_SYNC | REQ_META | REQ_PRIO,
  62. .old_blkaddr = index,
  63. .new_blkaddr = index,
  64. .encrypted_page = NULL,
  65. };
  66. if (unlikely(!is_meta))
  67. fio.rw &= ~REQ_META;
  68. repeat:
  69. page = f2fs_grab_cache_page(mapping, index, false);
  70. if (!page) {
  71. cond_resched();
  72. goto repeat;
  73. }
  74. if (PageUptodate(page))
  75. goto out;
  76. fio.page = page;
  77. if (f2fs_submit_page_bio(&fio)) {
  78. f2fs_put_page(page, 1);
  79. goto repeat;
  80. }
  81. lock_page(page);
  82. if (unlikely(page->mapping != mapping)) {
  83. f2fs_put_page(page, 1);
  84. goto repeat;
  85. }
  86. /*
  87. * if there is any IO error when accessing device, make our filesystem
  88. * readonly and make sure do not write checkpoint with non-uptodate
  89. * meta page.
  90. */
  91. if (unlikely(!PageUptodate(page)))
  92. f2fs_stop_checkpoint(sbi, false);
  93. out:
  94. return page;
  95. }
  96. struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
  97. {
  98. return __get_meta_page(sbi, index, true);
  99. }
  100. /* for POR only */
  101. struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
  102. {
  103. return __get_meta_page(sbi, index, false);
  104. }
  105. bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
  106. {
  107. switch (type) {
  108. case META_NAT:
  109. break;
  110. case META_SIT:
  111. if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
  112. return false;
  113. break;
  114. case META_SSA:
  115. if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
  116. blkaddr < SM_I(sbi)->ssa_blkaddr))
  117. return false;
  118. break;
  119. case META_CP:
  120. if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
  121. blkaddr < __start_cp_addr(sbi)))
  122. return false;
  123. break;
  124. case META_POR:
  125. if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
  126. blkaddr < MAIN_BLKADDR(sbi)))
  127. return false;
  128. break;
  129. default:
  130. BUG();
  131. }
  132. return true;
  133. }
  134. /*
  135. * Readahead CP/NAT/SIT/SSA pages
  136. */
  137. int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
  138. int type, bool sync)
  139. {
  140. struct page *page;
  141. block_t blkno = start;
  142. struct f2fs_io_info fio = {
  143. .sbi = sbi,
  144. .type = META,
  145. .rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
  146. .encrypted_page = NULL,
  147. };
  148. struct blk_plug plug;
  149. if (unlikely(type == META_POR))
  150. fio.rw &= ~REQ_META;
  151. blk_start_plug(&plug);
  152. for (; nrpages-- > 0; blkno++) {
  153. if (!is_valid_blkaddr(sbi, blkno, type))
  154. goto out;
  155. switch (type) {
  156. case META_NAT:
  157. if (unlikely(blkno >=
  158. NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
  159. blkno = 0;
  160. /* get nat block addr */
  161. fio.new_blkaddr = current_nat_addr(sbi,
  162. blkno * NAT_ENTRY_PER_BLOCK);
  163. break;
  164. case META_SIT:
  165. /* get sit block addr */
  166. fio.new_blkaddr = current_sit_addr(sbi,
  167. blkno * SIT_ENTRY_PER_BLOCK);
  168. break;
  169. case META_SSA:
  170. case META_CP:
  171. case META_POR:
  172. fio.new_blkaddr = blkno;
  173. break;
  174. default:
  175. BUG();
  176. }
  177. page = f2fs_grab_cache_page(META_MAPPING(sbi),
  178. fio.new_blkaddr, false);
  179. if (!page)
  180. continue;
  181. if (PageUptodate(page)) {
  182. f2fs_put_page(page, 1);
  183. continue;
  184. }
  185. fio.page = page;
  186. fio.old_blkaddr = fio.new_blkaddr;
  187. f2fs_submit_page_mbio(&fio);
  188. f2fs_put_page(page, 0);
  189. }
  190. out:
  191. f2fs_submit_merged_bio(sbi, META, READ);
  192. blk_finish_plug(&plug);
  193. return blkno - start;
  194. }
  195. void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
  196. {
  197. struct page *page;
  198. bool readahead = false;
  199. page = find_get_page(META_MAPPING(sbi), index);
  200. if (!page || !PageUptodate(page))
  201. readahead = true;
  202. f2fs_put_page(page, 0);
  203. if (readahead)
  204. ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true);
  205. }
  206. static int f2fs_write_meta_page(struct page *page,
  207. struct writeback_control *wbc)
  208. {
  209. struct f2fs_sb_info *sbi = F2FS_P_SB(page);
  210. trace_f2fs_writepage(page, META);
  211. if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
  212. goto redirty_out;
  213. if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
  214. goto redirty_out;
  215. if (unlikely(f2fs_cp_error(sbi)))
  216. goto redirty_out;
  217. write_meta_page(sbi, page);
  218. dec_page_count(sbi, F2FS_DIRTY_META);
  219. if (wbc->for_reclaim)
  220. f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, META, WRITE);
  221. unlock_page(page);
  222. if (unlikely(f2fs_cp_error(sbi)))
  223. f2fs_submit_merged_bio(sbi, META, WRITE);
  224. return 0;
  225. redirty_out:
  226. redirty_page_for_writepage(wbc, page);
  227. return AOP_WRITEPAGE_ACTIVATE;
  228. }
  229. static int f2fs_write_meta_pages(struct address_space *mapping,
  230. struct writeback_control *wbc)
  231. {
  232. struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
  233. long diff, written;
  234. /* collect a number of dirty meta pages and write together */
  235. if (wbc->for_kupdate ||
  236. get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
  237. goto skip_write;
  238. trace_f2fs_writepages(mapping->host, wbc, META);
  239. /* if mounting is failed, skip writing node pages */
  240. mutex_lock(&sbi->cp_mutex);
  241. diff = nr_pages_to_write(sbi, META, wbc);
  242. written = sync_meta_pages(sbi, META, wbc->nr_to_write);
  243. mutex_unlock(&sbi->cp_mutex);
  244. wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
  245. return 0;
  246. skip_write:
  247. wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
  248. trace_f2fs_writepages(mapping->host, wbc, META);
  249. return 0;
  250. }
  251. long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
  252. long nr_to_write)
  253. {
  254. struct address_space *mapping = META_MAPPING(sbi);
  255. pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
  256. struct pagevec pvec;
  257. long nwritten = 0;
  258. struct writeback_control wbc = {
  259. .for_reclaim = 0,
  260. };
  261. struct blk_plug plug;
  262. pagevec_init(&pvec, 0);
  263. blk_start_plug(&plug);
  264. while (index <= end) {
  265. int i, nr_pages;
  266. nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  267. PAGECACHE_TAG_DIRTY,
  268. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
  269. if (unlikely(nr_pages == 0))
  270. break;
  271. for (i = 0; i < nr_pages; i++) {
  272. struct page *page = pvec.pages[i];
  273. if (prev == ULONG_MAX)
  274. prev = page->index - 1;
  275. if (nr_to_write != LONG_MAX && page->index != prev + 1) {
  276. pagevec_release(&pvec);
  277. goto stop;
  278. }
  279. lock_page(page);
  280. if (unlikely(page->mapping != mapping)) {
  281. continue_unlock:
  282. unlock_page(page);
  283. continue;
  284. }
  285. if (!PageDirty(page)) {
  286. /* someone wrote it for us */
  287. goto continue_unlock;
  288. }
  289. f2fs_wait_on_page_writeback(page, META, true);
  290. BUG_ON(PageWriteback(page));
  291. if (!clear_page_dirty_for_io(page))
  292. goto continue_unlock;
  293. if (mapping->a_ops->writepage(page, &wbc)) {
  294. unlock_page(page);
  295. break;
  296. }
  297. nwritten++;
  298. prev = page->index;
  299. if (unlikely(nwritten >= nr_to_write))
  300. break;
  301. }
  302. pagevec_release(&pvec);
  303. cond_resched();
  304. }
  305. stop:
  306. if (nwritten)
  307. f2fs_submit_merged_bio(sbi, type, WRITE);
  308. blk_finish_plug(&plug);
  309. return nwritten;
  310. }
  311. static int f2fs_set_meta_page_dirty(struct page *page)
  312. {
  313. trace_f2fs_set_page_dirty(page, META);
  314. SetPageUptodate(page);
  315. if (!PageDirty(page)) {
  316. __set_page_dirty_nobuffers(page);
  317. inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
  318. SetPagePrivate(page);
  319. f2fs_trace_pid(page);
  320. return 1;
  321. }
  322. return 0;
  323. }
  324. const struct address_space_operations f2fs_meta_aops = {
  325. .writepage = f2fs_write_meta_page,
  326. .writepages = f2fs_write_meta_pages,
  327. .set_page_dirty = f2fs_set_meta_page_dirty,
  328. .invalidatepage = f2fs_invalidate_page,
  329. .releasepage = f2fs_release_page,
  330. };
  331. static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
  332. {
  333. struct inode_management *im = &sbi->im[type];
  334. struct ino_entry *e, *tmp;
  335. tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
  336. retry:
  337. radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
  338. spin_lock(&im->ino_lock);
  339. e = radix_tree_lookup(&im->ino_root, ino);
  340. if (!e) {
  341. e = tmp;
  342. if (radix_tree_insert(&im->ino_root, ino, e)) {
  343. spin_unlock(&im->ino_lock);
  344. radix_tree_preload_end();
  345. goto retry;
  346. }
  347. memset(e, 0, sizeof(struct ino_entry));
  348. e->ino = ino;
  349. list_add_tail(&e->list, &im->ino_list);
  350. if (type != ORPHAN_INO)
  351. im->ino_num++;
  352. }
  353. spin_unlock(&im->ino_lock);
  354. radix_tree_preload_end();
  355. if (e != tmp)
  356. kmem_cache_free(ino_entry_slab, tmp);
  357. }
  358. static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
  359. {
  360. struct inode_management *im = &sbi->im[type];
  361. struct ino_entry *e;
  362. spin_lock(&im->ino_lock);
  363. e = radix_tree_lookup(&im->ino_root, ino);
  364. if (e) {
  365. list_del(&e->list);
  366. radix_tree_delete(&im->ino_root, ino);
  367. im->ino_num--;
  368. spin_unlock(&im->ino_lock);
  369. kmem_cache_free(ino_entry_slab, e);
  370. return;
  371. }
  372. spin_unlock(&im->ino_lock);
  373. }
  374. void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
  375. {
  376. /* add new dirty ino entry into list */
  377. __add_ino_entry(sbi, ino, type);
  378. }
  379. void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
  380. {
  381. /* remove dirty ino entry from list */
  382. __remove_ino_entry(sbi, ino, type);
  383. }
  384. /* mode should be APPEND_INO or UPDATE_INO */
  385. bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
  386. {
  387. struct inode_management *im = &sbi->im[mode];
  388. struct ino_entry *e;
  389. spin_lock(&im->ino_lock);
  390. e = radix_tree_lookup(&im->ino_root, ino);
  391. spin_unlock(&im->ino_lock);
  392. return e ? true : false;
  393. }
  394. void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
  395. {
  396. struct ino_entry *e, *tmp;
  397. int i;
  398. for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) {
  399. struct inode_management *im = &sbi->im[i];
  400. spin_lock(&im->ino_lock);
  401. list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
  402. list_del(&e->list);
  403. radix_tree_delete(&im->ino_root, e->ino);
  404. kmem_cache_free(ino_entry_slab, e);
  405. im->ino_num--;
  406. }
  407. spin_unlock(&im->ino_lock);
  408. }
  409. }
  410. int acquire_orphan_inode(struct f2fs_sb_info *sbi)
  411. {
  412. struct inode_management *im = &sbi->im[ORPHAN_INO];
  413. int err = 0;
  414. spin_lock(&im->ino_lock);
  415. #ifdef CONFIG_F2FS_FAULT_INJECTION
  416. if (time_to_inject(FAULT_ORPHAN)) {
  417. spin_unlock(&im->ino_lock);
  418. return -ENOSPC;
  419. }
  420. #endif
  421. if (unlikely(im->ino_num >= sbi->max_orphans))
  422. err = -ENOSPC;
  423. else
  424. im->ino_num++;
  425. spin_unlock(&im->ino_lock);
  426. return err;
  427. }
  428. void release_orphan_inode(struct f2fs_sb_info *sbi)
  429. {
  430. struct inode_management *im = &sbi->im[ORPHAN_INO];
  431. spin_lock(&im->ino_lock);
  432. f2fs_bug_on(sbi, im->ino_num == 0);
  433. im->ino_num--;
  434. spin_unlock(&im->ino_lock);
  435. }
  436. void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
  437. {
  438. /* add new orphan ino entry into list */
  439. __add_ino_entry(sbi, ino, ORPHAN_INO);
  440. }
  441. void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
  442. {
  443. /* remove orphan entry from orphan list */
  444. __remove_ino_entry(sbi, ino, ORPHAN_INO);
  445. }
  446. static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
  447. {
  448. struct inode *inode;
  449. inode = f2fs_iget(sbi->sb, ino);
  450. if (IS_ERR(inode)) {
  451. /*
  452. * there should be a bug that we can't find the entry
  453. * to orphan inode.
  454. */
  455. f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
  456. return PTR_ERR(inode);
  457. }
  458. clear_nlink(inode);
  459. mark_inode_dirty_sync(inode);
  460. /* truncate all the data during iput */
  461. iput(inode);
  462. return 0;
  463. }
  464. int recover_orphan_inodes(struct f2fs_sb_info *sbi)
  465. {
  466. block_t start_blk, orphan_blocks, i, j;
  467. int err;
  468. if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
  469. return 0;
  470. start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
  471. orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
  472. ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
  473. for (i = 0; i < orphan_blocks; i++) {
  474. struct page *page = get_meta_page(sbi, start_blk + i);
  475. struct f2fs_orphan_block *orphan_blk;
  476. orphan_blk = (struct f2fs_orphan_block *)page_address(page);
  477. for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
  478. nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
  479. err = recover_orphan_inode(sbi, ino);
  480. if (err) {
  481. f2fs_put_page(page, 1);
  482. return err;
  483. }
  484. }
  485. f2fs_put_page(page, 1);
  486. }
  487. /* clear Orphan Flag */
  488. clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
  489. return 0;
  490. }
  491. static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
  492. {
  493. struct list_head *head;
  494. struct f2fs_orphan_block *orphan_blk = NULL;
  495. unsigned int nentries = 0;
  496. unsigned short index = 1;
  497. unsigned short orphan_blocks;
  498. struct page *page = NULL;
  499. struct ino_entry *orphan = NULL;
  500. struct inode_management *im = &sbi->im[ORPHAN_INO];
  501. orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
  502. /*
  503. * we don't need to do spin_lock(&im->ino_lock) here, since all the
  504. * orphan inode operations are covered under f2fs_lock_op().
  505. * And, spin_lock should be avoided due to page operations below.
  506. */
  507. head = &im->ino_list;
  508. /* loop for each orphan inode entry and write them in Jornal block */
  509. list_for_each_entry(orphan, head, list) {
  510. if (!page) {
  511. page = grab_meta_page(sbi, start_blk++);
  512. orphan_blk =
  513. (struct f2fs_orphan_block *)page_address(page);
  514. memset(orphan_blk, 0, sizeof(*orphan_blk));
  515. }
  516. orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
  517. if (nentries == F2FS_ORPHANS_PER_BLOCK) {
  518. /*
  519. * an orphan block is full of 1020 entries,
  520. * then we need to flush current orphan blocks
  521. * and bring another one in memory
  522. */
  523. orphan_blk->blk_addr = cpu_to_le16(index);
  524. orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
  525. orphan_blk->entry_count = cpu_to_le32(nentries);
  526. set_page_dirty(page);
  527. f2fs_put_page(page, 1);
  528. index++;
  529. nentries = 0;
  530. page = NULL;
  531. }
  532. }
  533. if (page) {
  534. orphan_blk->blk_addr = cpu_to_le16(index);
  535. orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
  536. orphan_blk->entry_count = cpu_to_le32(nentries);
  537. set_page_dirty(page);
  538. f2fs_put_page(page, 1);
  539. }
  540. }
  541. static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
  542. block_t cp_addr, unsigned long long *version)
  543. {
  544. struct page *cp_page_1, *cp_page_2 = NULL;
  545. unsigned long blk_size = sbi->blocksize;
  546. struct f2fs_checkpoint *cp_block;
  547. unsigned long long cur_version = 0, pre_version = 0;
  548. size_t crc_offset;
  549. __u32 crc = 0;
  550. /* Read the 1st cp block in this CP pack */
  551. cp_page_1 = get_meta_page(sbi, cp_addr);
  552. /* get the version number */
  553. cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
  554. crc_offset = le32_to_cpu(cp_block->checksum_offset);
  555. if (crc_offset >= blk_size)
  556. goto invalid_cp1;
  557. crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
  558. if (!f2fs_crc_valid(sbi, crc, cp_block, crc_offset))
  559. goto invalid_cp1;
  560. pre_version = cur_cp_version(cp_block);
  561. /* Read the 2nd cp block in this CP pack */
  562. cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
  563. cp_page_2 = get_meta_page(sbi, cp_addr);
  564. cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
  565. crc_offset = le32_to_cpu(cp_block->checksum_offset);
  566. if (crc_offset >= blk_size)
  567. goto invalid_cp2;
  568. crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
  569. if (!f2fs_crc_valid(sbi, crc, cp_block, crc_offset))
  570. goto invalid_cp2;
  571. cur_version = cur_cp_version(cp_block);
  572. if (cur_version == pre_version) {
  573. *version = cur_version;
  574. f2fs_put_page(cp_page_2, 1);
  575. return cp_page_1;
  576. }
  577. invalid_cp2:
  578. f2fs_put_page(cp_page_2, 1);
  579. invalid_cp1:
  580. f2fs_put_page(cp_page_1, 1);
  581. return NULL;
  582. }
  583. int get_valid_checkpoint(struct f2fs_sb_info *sbi)
  584. {
  585. struct f2fs_checkpoint *cp_block;
  586. struct f2fs_super_block *fsb = sbi->raw_super;
  587. struct page *cp1, *cp2, *cur_page;
  588. unsigned long blk_size = sbi->blocksize;
  589. unsigned long long cp1_version = 0, cp2_version = 0;
  590. unsigned long long cp_start_blk_no;
  591. unsigned int cp_blks = 1 + __cp_payload(sbi);
  592. block_t cp_blk_no;
  593. int i;
  594. sbi->ckpt = kzalloc(cp_blks * blk_size, GFP_KERNEL);
  595. if (!sbi->ckpt)
  596. return -ENOMEM;
  597. /*
  598. * Finding out valid cp block involves read both
  599. * sets( cp pack1 and cp pack 2)
  600. */
  601. cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
  602. cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
  603. /* The second checkpoint pack should start at the next segment */
  604. cp_start_blk_no += ((unsigned long long)1) <<
  605. le32_to_cpu(fsb->log_blocks_per_seg);
  606. cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
  607. if (cp1 && cp2) {
  608. if (ver_after(cp2_version, cp1_version))
  609. cur_page = cp2;
  610. else
  611. cur_page = cp1;
  612. } else if (cp1) {
  613. cur_page = cp1;
  614. } else if (cp2) {
  615. cur_page = cp2;
  616. } else {
  617. goto fail_no_cp;
  618. }
  619. cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
  620. memcpy(sbi->ckpt, cp_block, blk_size);
  621. /* Sanity checking of checkpoint */
  622. if (sanity_check_ckpt(sbi))
  623. goto fail_no_cp;
  624. if (cp_blks <= 1)
  625. goto done;
  626. cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
  627. if (cur_page == cp2)
  628. cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
  629. for (i = 1; i < cp_blks; i++) {
  630. void *sit_bitmap_ptr;
  631. unsigned char *ckpt = (unsigned char *)sbi->ckpt;
  632. cur_page = get_meta_page(sbi, cp_blk_no + i);
  633. sit_bitmap_ptr = page_address(cur_page);
  634. memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
  635. f2fs_put_page(cur_page, 1);
  636. }
  637. done:
  638. f2fs_put_page(cp1, 1);
  639. f2fs_put_page(cp2, 1);
  640. return 0;
  641. fail_no_cp:
  642. kfree(sbi->ckpt);
  643. return -EINVAL;
  644. }
  645. static void __add_dirty_inode(struct inode *inode, enum inode_type type)
  646. {
  647. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  648. int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
  649. if (is_inode_flag_set(inode, flag))
  650. return;
  651. set_inode_flag(inode, flag);
  652. list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
  653. stat_inc_dirty_inode(sbi, type);
  654. }
  655. static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
  656. {
  657. int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
  658. if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
  659. return;
  660. list_del_init(&F2FS_I(inode)->dirty_list);
  661. clear_inode_flag(inode, flag);
  662. stat_dec_dirty_inode(F2FS_I_SB(inode), type);
  663. }
  664. void update_dirty_page(struct inode *inode, struct page *page)
  665. {
  666. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  667. enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
  668. if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
  669. !S_ISLNK(inode->i_mode))
  670. return;
  671. spin_lock(&sbi->inode_lock[type]);
  672. if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
  673. __add_dirty_inode(inode, type);
  674. inode_inc_dirty_pages(inode);
  675. spin_unlock(&sbi->inode_lock[type]);
  676. SetPagePrivate(page);
  677. f2fs_trace_pid(page);
  678. }
  679. void remove_dirty_inode(struct inode *inode)
  680. {
  681. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  682. enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
  683. if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
  684. !S_ISLNK(inode->i_mode))
  685. return;
  686. if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
  687. return;
  688. spin_lock(&sbi->inode_lock[type]);
  689. __remove_dirty_inode(inode, type);
  690. spin_unlock(&sbi->inode_lock[type]);
  691. }
  692. int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
  693. {
  694. struct list_head *head;
  695. struct inode *inode;
  696. struct f2fs_inode_info *fi;
  697. bool is_dir = (type == DIR_INODE);
  698. trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
  699. get_pages(sbi, is_dir ?
  700. F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
  701. retry:
  702. if (unlikely(f2fs_cp_error(sbi)))
  703. return -EIO;
  704. spin_lock(&sbi->inode_lock[type]);
  705. head = &sbi->inode_list[type];
  706. if (list_empty(head)) {
  707. spin_unlock(&sbi->inode_lock[type]);
  708. trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
  709. get_pages(sbi, is_dir ?
  710. F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
  711. return 0;
  712. }
  713. fi = list_entry(head->next, struct f2fs_inode_info, dirty_list);
  714. inode = igrab(&fi->vfs_inode);
  715. spin_unlock(&sbi->inode_lock[type]);
  716. if (inode) {
  717. filemap_fdatawrite(inode->i_mapping);
  718. iput(inode);
  719. } else {
  720. /*
  721. * We should submit bio, since it exists several
  722. * wribacking dentry pages in the freeing inode.
  723. */
  724. f2fs_submit_merged_bio(sbi, DATA, WRITE);
  725. cond_resched();
  726. }
  727. goto retry;
  728. }
  729. int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
  730. {
  731. struct list_head *head = &sbi->inode_list[DIRTY_META];
  732. struct inode *inode;
  733. struct f2fs_inode_info *fi;
  734. s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
  735. while (total--) {
  736. if (unlikely(f2fs_cp_error(sbi)))
  737. return -EIO;
  738. spin_lock(&sbi->inode_lock[DIRTY_META]);
  739. if (list_empty(head)) {
  740. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  741. return 0;
  742. }
  743. fi = list_entry(head->next, struct f2fs_inode_info,
  744. gdirty_list);
  745. inode = igrab(&fi->vfs_inode);
  746. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  747. if (inode) {
  748. update_inode_page(inode);
  749. iput(inode);
  750. }
  751. };
  752. return 0;
  753. }
  754. /*
  755. * Freeze all the FS-operations for checkpoint.
  756. */
  757. static int block_operations(struct f2fs_sb_info *sbi)
  758. {
  759. struct writeback_control wbc = {
  760. .sync_mode = WB_SYNC_ALL,
  761. .nr_to_write = LONG_MAX,
  762. .for_reclaim = 0,
  763. };
  764. struct blk_plug plug;
  765. int err = 0;
  766. blk_start_plug(&plug);
  767. retry_flush_dents:
  768. f2fs_lock_all(sbi);
  769. /* write all the dirty dentry pages */
  770. if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
  771. f2fs_unlock_all(sbi);
  772. err = sync_dirty_inodes(sbi, DIR_INODE);
  773. if (err)
  774. goto out;
  775. goto retry_flush_dents;
  776. }
  777. if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
  778. f2fs_unlock_all(sbi);
  779. err = f2fs_sync_inode_meta(sbi);
  780. if (err)
  781. goto out;
  782. goto retry_flush_dents;
  783. }
  784. /*
  785. * POR: we should ensure that there are no dirty node pages
  786. * until finishing nat/sit flush.
  787. */
  788. retry_flush_nodes:
  789. down_write(&sbi->node_write);
  790. if (get_pages(sbi, F2FS_DIRTY_NODES)) {
  791. up_write(&sbi->node_write);
  792. err = sync_node_pages(sbi, &wbc);
  793. if (err) {
  794. f2fs_unlock_all(sbi);
  795. goto out;
  796. }
  797. goto retry_flush_nodes;
  798. }
  799. out:
  800. blk_finish_plug(&plug);
  801. return err;
  802. }
  803. static void unblock_operations(struct f2fs_sb_info *sbi)
  804. {
  805. up_write(&sbi->node_write);
  806. f2fs_unlock_all(sbi);
  807. }
  808. static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
  809. {
  810. DEFINE_WAIT(wait);
  811. for (;;) {
  812. prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
  813. if (!atomic_read(&sbi->nr_wb_bios))
  814. break;
  815. io_schedule_timeout(5*HZ);
  816. }
  817. finish_wait(&sbi->cp_wait, &wait);
  818. }
  819. static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  820. {
  821. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  822. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
  823. struct f2fs_nm_info *nm_i = NM_I(sbi);
  824. unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
  825. nid_t last_nid = nm_i->next_scan_nid;
  826. block_t start_blk;
  827. unsigned int data_sum_blocks, orphan_blocks;
  828. __u32 crc32 = 0;
  829. int i;
  830. int cp_payload_blks = __cp_payload(sbi);
  831. block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
  832. bool invalidate = false;
  833. struct super_block *sb = sbi->sb;
  834. struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
  835. u64 kbytes_written;
  836. /*
  837. * This avoids to conduct wrong roll-forward operations and uses
  838. * metapages, so should be called prior to sync_meta_pages below.
  839. */
  840. if (discard_next_dnode(sbi, discard_blk))
  841. invalidate = true;
  842. /* Flush all the NAT/SIT pages */
  843. while (get_pages(sbi, F2FS_DIRTY_META)) {
  844. sync_meta_pages(sbi, META, LONG_MAX);
  845. if (unlikely(f2fs_cp_error(sbi)))
  846. return -EIO;
  847. }
  848. next_free_nid(sbi, &last_nid);
  849. /*
  850. * modify checkpoint
  851. * version number is already updated
  852. */
  853. ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
  854. ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
  855. ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
  856. for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
  857. ckpt->cur_node_segno[i] =
  858. cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
  859. ckpt->cur_node_blkoff[i] =
  860. cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
  861. ckpt->alloc_type[i + CURSEG_HOT_NODE] =
  862. curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
  863. }
  864. for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
  865. ckpt->cur_data_segno[i] =
  866. cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
  867. ckpt->cur_data_blkoff[i] =
  868. cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
  869. ckpt->alloc_type[i + CURSEG_HOT_DATA] =
  870. curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
  871. }
  872. ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
  873. ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
  874. ckpt->next_free_nid = cpu_to_le32(last_nid);
  875. /* 2 cp + n data seg summary + orphan inode blocks */
  876. data_sum_blocks = npages_for_summary_flush(sbi, false);
  877. if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
  878. set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
  879. else
  880. clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
  881. orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
  882. ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
  883. orphan_blocks);
  884. if (__remain_node_summaries(cpc->reason))
  885. ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
  886. cp_payload_blks + data_sum_blocks +
  887. orphan_blocks + NR_CURSEG_NODE_TYPE);
  888. else
  889. ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
  890. cp_payload_blks + data_sum_blocks +
  891. orphan_blocks);
  892. if (cpc->reason == CP_UMOUNT)
  893. set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
  894. else
  895. clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
  896. if (cpc->reason == CP_FASTBOOT)
  897. set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
  898. else
  899. clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
  900. if (orphan_num)
  901. set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
  902. else
  903. clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
  904. if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
  905. set_ckpt_flags(ckpt, CP_FSCK_FLAG);
  906. /* update SIT/NAT bitmap */
  907. get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
  908. get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
  909. crc32 = f2fs_crc32(sbi, ckpt, le32_to_cpu(ckpt->checksum_offset));
  910. *((__le32 *)((unsigned char *)ckpt +
  911. le32_to_cpu(ckpt->checksum_offset)))
  912. = cpu_to_le32(crc32);
  913. start_blk = __start_cp_addr(sbi);
  914. /* need to wait for end_io results */
  915. wait_on_all_pages_writeback(sbi);
  916. if (unlikely(f2fs_cp_error(sbi)))
  917. return -EIO;
  918. /* write out checkpoint buffer at block 0 */
  919. update_meta_page(sbi, ckpt, start_blk++);
  920. for (i = 1; i < 1 + cp_payload_blks; i++)
  921. update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
  922. start_blk++);
  923. if (orphan_num) {
  924. write_orphan_inodes(sbi, start_blk);
  925. start_blk += orphan_blocks;
  926. }
  927. write_data_summaries(sbi, start_blk);
  928. start_blk += data_sum_blocks;
  929. /* Record write statistics in the hot node summary */
  930. kbytes_written = sbi->kbytes_written;
  931. if (sb->s_bdev->bd_part)
  932. kbytes_written += BD_PART_WRITTEN(sbi);
  933. seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
  934. if (__remain_node_summaries(cpc->reason)) {
  935. write_node_summaries(sbi, start_blk);
  936. start_blk += NR_CURSEG_NODE_TYPE;
  937. }
  938. /* writeout checkpoint block */
  939. update_meta_page(sbi, ckpt, start_blk);
  940. /* wait for previous submitted node/meta pages writeback */
  941. wait_on_all_pages_writeback(sbi);
  942. if (unlikely(f2fs_cp_error(sbi)))
  943. return -EIO;
  944. filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX);
  945. filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX);
  946. /* update user_block_counts */
  947. sbi->last_valid_block_count = sbi->total_valid_block_count;
  948. percpu_counter_set(&sbi->alloc_valid_block_count, 0);
  949. /* Here, we only have one bio having CP pack */
  950. sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
  951. /* wait for previous submitted meta pages writeback */
  952. wait_on_all_pages_writeback(sbi);
  953. /*
  954. * invalidate meta page which is used temporarily for zeroing out
  955. * block at the end of warm node chain.
  956. */
  957. if (invalidate)
  958. invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
  959. discard_blk);
  960. release_ino_entry(sbi, false);
  961. if (unlikely(f2fs_cp_error(sbi)))
  962. return -EIO;
  963. clear_prefree_segments(sbi, cpc);
  964. clear_sbi_flag(sbi, SBI_IS_DIRTY);
  965. return 0;
  966. }
  967. /*
  968. * We guarantee that this checkpoint procedure will not fail.
  969. */
  970. int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  971. {
  972. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  973. unsigned long long ckpt_ver;
  974. int err = 0;
  975. mutex_lock(&sbi->cp_mutex);
  976. if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
  977. (cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
  978. (cpc->reason == CP_DISCARD && !sbi->discard_blks)))
  979. goto out;
  980. if (unlikely(f2fs_cp_error(sbi))) {
  981. err = -EIO;
  982. goto out;
  983. }
  984. if (f2fs_readonly(sbi->sb)) {
  985. err = -EROFS;
  986. goto out;
  987. }
  988. trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
  989. err = block_operations(sbi);
  990. if (err)
  991. goto out;
  992. trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
  993. f2fs_flush_merged_bios(sbi);
  994. /*
  995. * update checkpoint pack index
  996. * Increase the version number so that
  997. * SIT entries and seg summaries are written at correct place
  998. */
  999. ckpt_ver = cur_cp_version(ckpt);
  1000. ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
  1001. /* write cached NAT/SIT entries to NAT/SIT area */
  1002. flush_nat_entries(sbi);
  1003. flush_sit_entries(sbi, cpc);
  1004. /* unlock all the fs_lock[] in do_checkpoint() */
  1005. err = do_checkpoint(sbi, cpc);
  1006. unblock_operations(sbi);
  1007. stat_inc_cp_count(sbi->stat_info);
  1008. if (cpc->reason == CP_RECOVERY)
  1009. f2fs_msg(sbi->sb, KERN_NOTICE,
  1010. "checkpoint: version = %llx", ckpt_ver);
  1011. /* do checkpoint periodically */
  1012. f2fs_update_time(sbi, CP_TIME);
  1013. trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
  1014. out:
  1015. mutex_unlock(&sbi->cp_mutex);
  1016. return err;
  1017. }
  1018. void init_ino_entry_info(struct f2fs_sb_info *sbi)
  1019. {
  1020. int i;
  1021. for (i = 0; i < MAX_INO_ENTRY; i++) {
  1022. struct inode_management *im = &sbi->im[i];
  1023. INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
  1024. spin_lock_init(&im->ino_lock);
  1025. INIT_LIST_HEAD(&im->ino_list);
  1026. im->ino_num = 0;
  1027. }
  1028. sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
  1029. NR_CURSEG_TYPE - __cp_payload(sbi)) *
  1030. F2FS_ORPHANS_PER_BLOCK;
  1031. }
  1032. int __init create_checkpoint_caches(void)
  1033. {
  1034. ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
  1035. sizeof(struct ino_entry));
  1036. if (!ino_entry_slab)
  1037. return -ENOMEM;
  1038. inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
  1039. sizeof(struct inode_entry));
  1040. if (!inode_entry_slab) {
  1041. kmem_cache_destroy(ino_entry_slab);
  1042. return -ENOMEM;
  1043. }
  1044. return 0;
  1045. }
  1046. void destroy_checkpoint_caches(void)
  1047. {
  1048. kmem_cache_destroy(ino_entry_slab);
  1049. kmem_cache_destroy(inode_entry_slab);
  1050. }