node.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863
  1. /*
  2. * fs/f2fs/node.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/f2fs_fs.h>
  13. #include <linux/mpage.h>
  14. #include <linux/backing-dev.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/pagevec.h>
  17. #include <linux/swap.h>
  18. #include "f2fs.h"
  19. #include "node.h"
  20. #include "segment.h"
  21. #include <trace/events/f2fs.h>
  22. static struct kmem_cache *nat_entry_slab;
  23. static struct kmem_cache *free_nid_slab;
  24. static void clear_node_page_dirty(struct page *page)
  25. {
  26. struct address_space *mapping = page->mapping;
  27. struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
  28. unsigned int long flags;
  29. if (PageDirty(page)) {
  30. spin_lock_irqsave(&mapping->tree_lock, flags);
  31. radix_tree_tag_clear(&mapping->page_tree,
  32. page_index(page),
  33. PAGECACHE_TAG_DIRTY);
  34. spin_unlock_irqrestore(&mapping->tree_lock, flags);
  35. clear_page_dirty_for_io(page);
  36. dec_page_count(sbi, F2FS_DIRTY_NODES);
  37. }
  38. ClearPageUptodate(page);
  39. }
  40. static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
  41. {
  42. pgoff_t index = current_nat_addr(sbi, nid);
  43. return get_meta_page(sbi, index);
  44. }
  45. static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
  46. {
  47. struct page *src_page;
  48. struct page *dst_page;
  49. pgoff_t src_off;
  50. pgoff_t dst_off;
  51. void *src_addr;
  52. void *dst_addr;
  53. struct f2fs_nm_info *nm_i = NM_I(sbi);
  54. src_off = current_nat_addr(sbi, nid);
  55. dst_off = next_nat_addr(sbi, src_off);
  56. /* get current nat block page with lock */
  57. src_page = get_meta_page(sbi, src_off);
  58. /* Dirty src_page means that it is already the new target NAT page. */
  59. if (PageDirty(src_page))
  60. return src_page;
  61. dst_page = grab_meta_page(sbi, dst_off);
  62. src_addr = page_address(src_page);
  63. dst_addr = page_address(dst_page);
  64. memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
  65. set_page_dirty(dst_page);
  66. f2fs_put_page(src_page, 1);
  67. set_to_next_nat(nm_i, nid);
  68. return dst_page;
  69. }
  70. /*
  71. * Readahead NAT pages
  72. */
  73. static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
  74. {
  75. struct address_space *mapping = sbi->meta_inode->i_mapping;
  76. struct f2fs_nm_info *nm_i = NM_I(sbi);
  77. struct page *page;
  78. pgoff_t index;
  79. int i;
  80. for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
  81. if (nid >= nm_i->max_nid)
  82. nid = 0;
  83. index = current_nat_addr(sbi, nid);
  84. page = grab_cache_page(mapping, index);
  85. if (!page)
  86. continue;
  87. if (PageUptodate(page)) {
  88. mark_page_accessed(page);
  89. f2fs_put_page(page, 1);
  90. continue;
  91. }
  92. submit_read_page(sbi, page, index, READ_SYNC | REQ_META);
  93. mark_page_accessed(page);
  94. f2fs_put_page(page, 0);
  95. }
  96. f2fs_submit_read_bio(sbi, READ_SYNC | REQ_META);
  97. }
  98. static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
  99. {
  100. return radix_tree_lookup(&nm_i->nat_root, n);
  101. }
  102. static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
  103. nid_t start, unsigned int nr, struct nat_entry **ep)
  104. {
  105. return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
  106. }
  107. static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
  108. {
  109. list_del(&e->list);
  110. radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
  111. nm_i->nat_cnt--;
  112. kmem_cache_free(nat_entry_slab, e);
  113. }
  114. int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
  115. {
  116. struct f2fs_nm_info *nm_i = NM_I(sbi);
  117. struct nat_entry *e;
  118. int is_cp = 1;
  119. read_lock(&nm_i->nat_tree_lock);
  120. e = __lookup_nat_cache(nm_i, nid);
  121. if (e && !e->checkpointed)
  122. is_cp = 0;
  123. read_unlock(&nm_i->nat_tree_lock);
  124. return is_cp;
  125. }
  126. static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
  127. {
  128. struct nat_entry *new;
  129. new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
  130. if (!new)
  131. return NULL;
  132. if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
  133. kmem_cache_free(nat_entry_slab, new);
  134. return NULL;
  135. }
  136. memset(new, 0, sizeof(struct nat_entry));
  137. nat_set_nid(new, nid);
  138. list_add_tail(&new->list, &nm_i->nat_entries);
  139. nm_i->nat_cnt++;
  140. return new;
  141. }
  142. static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
  143. struct f2fs_nat_entry *ne)
  144. {
  145. struct nat_entry *e;
  146. retry:
  147. write_lock(&nm_i->nat_tree_lock);
  148. e = __lookup_nat_cache(nm_i, nid);
  149. if (!e) {
  150. e = grab_nat_entry(nm_i, nid);
  151. if (!e) {
  152. write_unlock(&nm_i->nat_tree_lock);
  153. goto retry;
  154. }
  155. nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
  156. nat_set_ino(e, le32_to_cpu(ne->ino));
  157. nat_set_version(e, ne->version);
  158. e->checkpointed = true;
  159. }
  160. write_unlock(&nm_i->nat_tree_lock);
  161. }
  162. static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
  163. block_t new_blkaddr)
  164. {
  165. struct f2fs_nm_info *nm_i = NM_I(sbi);
  166. struct nat_entry *e;
  167. retry:
  168. write_lock(&nm_i->nat_tree_lock);
  169. e = __lookup_nat_cache(nm_i, ni->nid);
  170. if (!e) {
  171. e = grab_nat_entry(nm_i, ni->nid);
  172. if (!e) {
  173. write_unlock(&nm_i->nat_tree_lock);
  174. goto retry;
  175. }
  176. e->ni = *ni;
  177. e->checkpointed = true;
  178. f2fs_bug_on(ni->blk_addr == NEW_ADDR);
  179. } else if (new_blkaddr == NEW_ADDR) {
  180. /*
  181. * when nid is reallocated,
  182. * previous nat entry can be remained in nat cache.
  183. * So, reinitialize it with new information.
  184. */
  185. e->ni = *ni;
  186. f2fs_bug_on(ni->blk_addr != NULL_ADDR);
  187. }
  188. if (new_blkaddr == NEW_ADDR)
  189. e->checkpointed = false;
  190. /* sanity check */
  191. f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
  192. f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
  193. new_blkaddr == NULL_ADDR);
  194. f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
  195. new_blkaddr == NEW_ADDR);
  196. f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
  197. nat_get_blkaddr(e) != NULL_ADDR &&
  198. new_blkaddr == NEW_ADDR);
  199. /* increament version no as node is removed */
  200. if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
  201. unsigned char version = nat_get_version(e);
  202. nat_set_version(e, inc_node_version(version));
  203. }
  204. /* change address */
  205. nat_set_blkaddr(e, new_blkaddr);
  206. __set_nat_cache_dirty(nm_i, e);
  207. write_unlock(&nm_i->nat_tree_lock);
  208. }
  209. int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
  210. {
  211. struct f2fs_nm_info *nm_i = NM_I(sbi);
  212. if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
  213. return 0;
  214. write_lock(&nm_i->nat_tree_lock);
  215. while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
  216. struct nat_entry *ne;
  217. ne = list_first_entry(&nm_i->nat_entries,
  218. struct nat_entry, list);
  219. __del_from_nat_cache(nm_i, ne);
  220. nr_shrink--;
  221. }
  222. write_unlock(&nm_i->nat_tree_lock);
  223. return nr_shrink;
  224. }
  225. /*
  226. * This function returns always success
  227. */
  228. void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
  229. {
  230. struct f2fs_nm_info *nm_i = NM_I(sbi);
  231. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
  232. struct f2fs_summary_block *sum = curseg->sum_blk;
  233. nid_t start_nid = START_NID(nid);
  234. struct f2fs_nat_block *nat_blk;
  235. struct page *page = NULL;
  236. struct f2fs_nat_entry ne;
  237. struct nat_entry *e;
  238. int i;
  239. memset(&ne, 0, sizeof(struct f2fs_nat_entry));
  240. ni->nid = nid;
  241. /* Check nat cache */
  242. read_lock(&nm_i->nat_tree_lock);
  243. e = __lookup_nat_cache(nm_i, nid);
  244. if (e) {
  245. ni->ino = nat_get_ino(e);
  246. ni->blk_addr = nat_get_blkaddr(e);
  247. ni->version = nat_get_version(e);
  248. }
  249. read_unlock(&nm_i->nat_tree_lock);
  250. if (e)
  251. return;
  252. /* Check current segment summary */
  253. mutex_lock(&curseg->curseg_mutex);
  254. i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
  255. if (i >= 0) {
  256. ne = nat_in_journal(sum, i);
  257. node_info_from_raw_nat(ni, &ne);
  258. }
  259. mutex_unlock(&curseg->curseg_mutex);
  260. if (i >= 0)
  261. goto cache;
  262. /* Fill node_info from nat page */
  263. page = get_current_nat_page(sbi, start_nid);
  264. nat_blk = (struct f2fs_nat_block *)page_address(page);
  265. ne = nat_blk->entries[nid - start_nid];
  266. node_info_from_raw_nat(ni, &ne);
  267. f2fs_put_page(page, 1);
  268. cache:
  269. /* cache nat entry */
  270. cache_nat_entry(NM_I(sbi), nid, &ne);
  271. }
  272. /*
  273. * The maximum depth is four.
  274. * Offset[0] will have raw inode offset.
  275. */
  276. static int get_node_path(struct f2fs_inode_info *fi, long block,
  277. int offset[4], unsigned int noffset[4])
  278. {
  279. const long direct_index = ADDRS_PER_INODE(fi);
  280. const long direct_blks = ADDRS_PER_BLOCK;
  281. const long dptrs_per_blk = NIDS_PER_BLOCK;
  282. const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
  283. const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
  284. int n = 0;
  285. int level = 0;
  286. noffset[0] = 0;
  287. if (block < direct_index) {
  288. offset[n] = block;
  289. goto got;
  290. }
  291. block -= direct_index;
  292. if (block < direct_blks) {
  293. offset[n++] = NODE_DIR1_BLOCK;
  294. noffset[n] = 1;
  295. offset[n] = block;
  296. level = 1;
  297. goto got;
  298. }
  299. block -= direct_blks;
  300. if (block < direct_blks) {
  301. offset[n++] = NODE_DIR2_BLOCK;
  302. noffset[n] = 2;
  303. offset[n] = block;
  304. level = 1;
  305. goto got;
  306. }
  307. block -= direct_blks;
  308. if (block < indirect_blks) {
  309. offset[n++] = NODE_IND1_BLOCK;
  310. noffset[n] = 3;
  311. offset[n++] = block / direct_blks;
  312. noffset[n] = 4 + offset[n - 1];
  313. offset[n] = block % direct_blks;
  314. level = 2;
  315. goto got;
  316. }
  317. block -= indirect_blks;
  318. if (block < indirect_blks) {
  319. offset[n++] = NODE_IND2_BLOCK;
  320. noffset[n] = 4 + dptrs_per_blk;
  321. offset[n++] = block / direct_blks;
  322. noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
  323. offset[n] = block % direct_blks;
  324. level = 2;
  325. goto got;
  326. }
  327. block -= indirect_blks;
  328. if (block < dindirect_blks) {
  329. offset[n++] = NODE_DIND_BLOCK;
  330. noffset[n] = 5 + (dptrs_per_blk * 2);
  331. offset[n++] = block / indirect_blks;
  332. noffset[n] = 6 + (dptrs_per_blk * 2) +
  333. offset[n - 1] * (dptrs_per_blk + 1);
  334. offset[n++] = (block / direct_blks) % dptrs_per_blk;
  335. noffset[n] = 7 + (dptrs_per_blk * 2) +
  336. offset[n - 2] * (dptrs_per_blk + 1) +
  337. offset[n - 1];
  338. offset[n] = block % direct_blks;
  339. level = 3;
  340. goto got;
  341. } else {
  342. BUG();
  343. }
  344. got:
  345. return level;
  346. }
  347. /*
  348. * Caller should call f2fs_put_dnode(dn).
  349. * Also, it should grab and release a mutex by calling mutex_lock_op() and
  350. * mutex_unlock_op() only if ro is not set RDONLY_NODE.
  351. * In the case of RDONLY_NODE, we don't need to care about mutex.
  352. */
  353. int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
  354. {
  355. struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  356. struct page *npage[4];
  357. struct page *parent;
  358. int offset[4];
  359. unsigned int noffset[4];
  360. nid_t nids[4];
  361. int level, i;
  362. int err = 0;
  363. level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
  364. nids[0] = dn->inode->i_ino;
  365. npage[0] = dn->inode_page;
  366. if (!npage[0]) {
  367. npage[0] = get_node_page(sbi, nids[0]);
  368. if (IS_ERR(npage[0]))
  369. return PTR_ERR(npage[0]);
  370. }
  371. parent = npage[0];
  372. if (level != 0)
  373. nids[1] = get_nid(parent, offset[0], true);
  374. dn->inode_page = npage[0];
  375. dn->inode_page_locked = true;
  376. /* get indirect or direct nodes */
  377. for (i = 1; i <= level; i++) {
  378. bool done = false;
  379. if (!nids[i] && mode == ALLOC_NODE) {
  380. /* alloc new node */
  381. if (!alloc_nid(sbi, &(nids[i]))) {
  382. err = -ENOSPC;
  383. goto release_pages;
  384. }
  385. dn->nid = nids[i];
  386. npage[i] = new_node_page(dn, noffset[i], NULL);
  387. if (IS_ERR(npage[i])) {
  388. alloc_nid_failed(sbi, nids[i]);
  389. err = PTR_ERR(npage[i]);
  390. goto release_pages;
  391. }
  392. set_nid(parent, offset[i - 1], nids[i], i == 1);
  393. alloc_nid_done(sbi, nids[i]);
  394. done = true;
  395. } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
  396. npage[i] = get_node_page_ra(parent, offset[i - 1]);
  397. if (IS_ERR(npage[i])) {
  398. err = PTR_ERR(npage[i]);
  399. goto release_pages;
  400. }
  401. done = true;
  402. }
  403. if (i == 1) {
  404. dn->inode_page_locked = false;
  405. unlock_page(parent);
  406. } else {
  407. f2fs_put_page(parent, 1);
  408. }
  409. if (!done) {
  410. npage[i] = get_node_page(sbi, nids[i]);
  411. if (IS_ERR(npage[i])) {
  412. err = PTR_ERR(npage[i]);
  413. f2fs_put_page(npage[0], 0);
  414. goto release_out;
  415. }
  416. }
  417. if (i < level) {
  418. parent = npage[i];
  419. nids[i + 1] = get_nid(parent, offset[i], false);
  420. }
  421. }
  422. dn->nid = nids[level];
  423. dn->ofs_in_node = offset[level];
  424. dn->node_page = npage[level];
  425. dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
  426. return 0;
  427. release_pages:
  428. f2fs_put_page(parent, 1);
  429. if (i > 1)
  430. f2fs_put_page(npage[0], 0);
  431. release_out:
  432. dn->inode_page = NULL;
  433. dn->node_page = NULL;
  434. return err;
  435. }
  436. static void truncate_node(struct dnode_of_data *dn)
  437. {
  438. struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  439. struct node_info ni;
  440. get_node_info(sbi, dn->nid, &ni);
  441. if (dn->inode->i_blocks == 0) {
  442. f2fs_bug_on(ni.blk_addr != NULL_ADDR);
  443. goto invalidate;
  444. }
  445. f2fs_bug_on(ni.blk_addr == NULL_ADDR);
  446. /* Deallocate node address */
  447. invalidate_blocks(sbi, ni.blk_addr);
  448. dec_valid_node_count(sbi, dn->inode);
  449. set_node_addr(sbi, &ni, NULL_ADDR);
  450. if (dn->nid == dn->inode->i_ino) {
  451. remove_orphan_inode(sbi, dn->nid);
  452. dec_valid_inode_count(sbi);
  453. } else {
  454. sync_inode_page(dn);
  455. }
  456. invalidate:
  457. clear_node_page_dirty(dn->node_page);
  458. F2FS_SET_SB_DIRT(sbi);
  459. f2fs_put_page(dn->node_page, 1);
  460. dn->node_page = NULL;
  461. trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
  462. }
  463. static int truncate_dnode(struct dnode_of_data *dn)
  464. {
  465. struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  466. struct page *page;
  467. if (dn->nid == 0)
  468. return 1;
  469. /* get direct node */
  470. page = get_node_page(sbi, dn->nid);
  471. if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
  472. return 1;
  473. else if (IS_ERR(page))
  474. return PTR_ERR(page);
  475. /* Make dnode_of_data for parameter */
  476. dn->node_page = page;
  477. dn->ofs_in_node = 0;
  478. truncate_data_blocks(dn);
  479. truncate_node(dn);
  480. return 1;
  481. }
  482. static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
  483. int ofs, int depth)
  484. {
  485. struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  486. struct dnode_of_data rdn = *dn;
  487. struct page *page;
  488. struct f2fs_node *rn;
  489. nid_t child_nid;
  490. unsigned int child_nofs;
  491. int freed = 0;
  492. int i, ret;
  493. if (dn->nid == 0)
  494. return NIDS_PER_BLOCK + 1;
  495. trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
  496. page = get_node_page(sbi, dn->nid);
  497. if (IS_ERR(page)) {
  498. trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
  499. return PTR_ERR(page);
  500. }
  501. rn = F2FS_NODE(page);
  502. if (depth < 3) {
  503. for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
  504. child_nid = le32_to_cpu(rn->in.nid[i]);
  505. if (child_nid == 0)
  506. continue;
  507. rdn.nid = child_nid;
  508. ret = truncate_dnode(&rdn);
  509. if (ret < 0)
  510. goto out_err;
  511. set_nid(page, i, 0, false);
  512. }
  513. } else {
  514. child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
  515. for (i = ofs; i < NIDS_PER_BLOCK; i++) {
  516. child_nid = le32_to_cpu(rn->in.nid[i]);
  517. if (child_nid == 0) {
  518. child_nofs += NIDS_PER_BLOCK + 1;
  519. continue;
  520. }
  521. rdn.nid = child_nid;
  522. ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
  523. if (ret == (NIDS_PER_BLOCK + 1)) {
  524. set_nid(page, i, 0, false);
  525. child_nofs += ret;
  526. } else if (ret < 0 && ret != -ENOENT) {
  527. goto out_err;
  528. }
  529. }
  530. freed = child_nofs;
  531. }
  532. if (!ofs) {
  533. /* remove current indirect node */
  534. dn->node_page = page;
  535. truncate_node(dn);
  536. freed++;
  537. } else {
  538. f2fs_put_page(page, 1);
  539. }
  540. trace_f2fs_truncate_nodes_exit(dn->inode, freed);
  541. return freed;
  542. out_err:
  543. f2fs_put_page(page, 1);
  544. trace_f2fs_truncate_nodes_exit(dn->inode, ret);
  545. return ret;
  546. }
  547. static int truncate_partial_nodes(struct dnode_of_data *dn,
  548. struct f2fs_inode *ri, int *offset, int depth)
  549. {
  550. struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  551. struct page *pages[2];
  552. nid_t nid[3];
  553. nid_t child_nid;
  554. int err = 0;
  555. int i;
  556. int idx = depth - 2;
  557. nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
  558. if (!nid[0])
  559. return 0;
  560. /* get indirect nodes in the path */
  561. for (i = 0; i < depth - 1; i++) {
  562. /* refernece count'll be increased */
  563. pages[i] = get_node_page(sbi, nid[i]);
  564. if (IS_ERR(pages[i])) {
  565. depth = i + 1;
  566. err = PTR_ERR(pages[i]);
  567. goto fail;
  568. }
  569. nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
  570. }
  571. /* free direct nodes linked to a partial indirect node */
  572. for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
  573. child_nid = get_nid(pages[idx], i, false);
  574. if (!child_nid)
  575. continue;
  576. dn->nid = child_nid;
  577. err = truncate_dnode(dn);
  578. if (err < 0)
  579. goto fail;
  580. set_nid(pages[idx], i, 0, false);
  581. }
  582. if (offset[depth - 1] == 0) {
  583. dn->node_page = pages[idx];
  584. dn->nid = nid[idx];
  585. truncate_node(dn);
  586. } else {
  587. f2fs_put_page(pages[idx], 1);
  588. }
  589. offset[idx]++;
  590. offset[depth - 1] = 0;
  591. fail:
  592. for (i = depth - 3; i >= 0; i--)
  593. f2fs_put_page(pages[i], 1);
  594. trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
  595. return err;
  596. }
  597. /*
  598. * All the block addresses of data and nodes should be nullified.
  599. */
  600. int truncate_inode_blocks(struct inode *inode, pgoff_t from)
  601. {
  602. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  603. struct address_space *node_mapping = sbi->node_inode->i_mapping;
  604. int err = 0, cont = 1;
  605. int level, offset[4], noffset[4];
  606. unsigned int nofs = 0;
  607. struct f2fs_node *rn;
  608. struct dnode_of_data dn;
  609. struct page *page;
  610. trace_f2fs_truncate_inode_blocks_enter(inode, from);
  611. level = get_node_path(F2FS_I(inode), from, offset, noffset);
  612. restart:
  613. page = get_node_page(sbi, inode->i_ino);
  614. if (IS_ERR(page)) {
  615. trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
  616. return PTR_ERR(page);
  617. }
  618. set_new_dnode(&dn, inode, page, NULL, 0);
  619. unlock_page(page);
  620. rn = F2FS_NODE(page);
  621. switch (level) {
  622. case 0:
  623. case 1:
  624. nofs = noffset[1];
  625. break;
  626. case 2:
  627. nofs = noffset[1];
  628. if (!offset[level - 1])
  629. goto skip_partial;
  630. err = truncate_partial_nodes(&dn, &rn->i, offset, level);
  631. if (err < 0 && err != -ENOENT)
  632. goto fail;
  633. nofs += 1 + NIDS_PER_BLOCK;
  634. break;
  635. case 3:
  636. nofs = 5 + 2 * NIDS_PER_BLOCK;
  637. if (!offset[level - 1])
  638. goto skip_partial;
  639. err = truncate_partial_nodes(&dn, &rn->i, offset, level);
  640. if (err < 0 && err != -ENOENT)
  641. goto fail;
  642. break;
  643. default:
  644. BUG();
  645. }
  646. skip_partial:
  647. while (cont) {
  648. dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
  649. switch (offset[0]) {
  650. case NODE_DIR1_BLOCK:
  651. case NODE_DIR2_BLOCK:
  652. err = truncate_dnode(&dn);
  653. break;
  654. case NODE_IND1_BLOCK:
  655. case NODE_IND2_BLOCK:
  656. err = truncate_nodes(&dn, nofs, offset[1], 2);
  657. break;
  658. case NODE_DIND_BLOCK:
  659. err = truncate_nodes(&dn, nofs, offset[1], 3);
  660. cont = 0;
  661. break;
  662. default:
  663. BUG();
  664. }
  665. if (err < 0 && err != -ENOENT)
  666. goto fail;
  667. if (offset[1] == 0 &&
  668. rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
  669. lock_page(page);
  670. if (page->mapping != node_mapping) {
  671. f2fs_put_page(page, 1);
  672. goto restart;
  673. }
  674. wait_on_page_writeback(page);
  675. rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
  676. set_page_dirty(page);
  677. unlock_page(page);
  678. }
  679. offset[1] = 0;
  680. offset[0]++;
  681. nofs += err;
  682. }
  683. fail:
  684. f2fs_put_page(page, 0);
  685. trace_f2fs_truncate_inode_blocks_exit(inode, err);
  686. return err > 0 ? 0 : err;
  687. }
  688. int truncate_xattr_node(struct inode *inode, struct page *page)
  689. {
  690. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  691. nid_t nid = F2FS_I(inode)->i_xattr_nid;
  692. struct dnode_of_data dn;
  693. struct page *npage;
  694. if (!nid)
  695. return 0;
  696. npage = get_node_page(sbi, nid);
  697. if (IS_ERR(npage))
  698. return PTR_ERR(npage);
  699. F2FS_I(inode)->i_xattr_nid = 0;
  700. /* need to do checkpoint during fsync */
  701. F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
  702. set_new_dnode(&dn, inode, page, npage, nid);
  703. if (page)
  704. dn.inode_page_locked = true;
  705. truncate_node(&dn);
  706. return 0;
  707. }
  708. /*
  709. * Caller should grab and release a mutex by calling mutex_lock_op() and
  710. * mutex_unlock_op().
  711. */
  712. void remove_inode_page(struct inode *inode)
  713. {
  714. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  715. struct page *page;
  716. nid_t ino = inode->i_ino;
  717. struct dnode_of_data dn;
  718. page = get_node_page(sbi, ino);
  719. if (IS_ERR(page))
  720. return;
  721. if (truncate_xattr_node(inode, page)) {
  722. f2fs_put_page(page, 1);
  723. return;
  724. }
  725. /* 0 is possible, after f2fs_new_inode() is failed */
  726. f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
  727. set_new_dnode(&dn, inode, page, page, ino);
  728. truncate_node(&dn);
  729. }
  730. struct page *new_inode_page(struct inode *inode, const struct qstr *name)
  731. {
  732. struct dnode_of_data dn;
  733. /* allocate inode page for new inode */
  734. set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
  735. /* caller should f2fs_put_page(page, 1); */
  736. return new_node_page(&dn, 0, NULL);
  737. }
  738. struct page *new_node_page(struct dnode_of_data *dn,
  739. unsigned int ofs, struct page *ipage)
  740. {
  741. struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  742. struct address_space *mapping = sbi->node_inode->i_mapping;
  743. struct node_info old_ni, new_ni;
  744. struct page *page;
  745. int err;
  746. if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
  747. return ERR_PTR(-EPERM);
  748. page = grab_cache_page(mapping, dn->nid);
  749. if (!page)
  750. return ERR_PTR(-ENOMEM);
  751. if (!inc_valid_node_count(sbi, dn->inode)) {
  752. err = -ENOSPC;
  753. goto fail;
  754. }
  755. get_node_info(sbi, dn->nid, &old_ni);
  756. /* Reinitialize old_ni with new node page */
  757. f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
  758. new_ni = old_ni;
  759. new_ni.ino = dn->inode->i_ino;
  760. set_node_addr(sbi, &new_ni, NEW_ADDR);
  761. fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
  762. set_cold_node(dn->inode, page);
  763. SetPageUptodate(page);
  764. set_page_dirty(page);
  765. if (ofs == XATTR_NODE_OFFSET)
  766. F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
  767. dn->node_page = page;
  768. if (ipage)
  769. update_inode(dn->inode, ipage);
  770. else
  771. sync_inode_page(dn);
  772. if (ofs == 0)
  773. inc_valid_inode_count(sbi);
  774. return page;
  775. fail:
  776. clear_node_page_dirty(page);
  777. f2fs_put_page(page, 1);
  778. return ERR_PTR(err);
  779. }
  780. /*
  781. * Caller should do after getting the following values.
  782. * 0: f2fs_put_page(page, 0)
  783. * LOCKED_PAGE: f2fs_put_page(page, 1)
  784. * error: nothing
  785. */
  786. static int read_node_page(struct page *page, int type)
  787. {
  788. struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
  789. struct node_info ni;
  790. get_node_info(sbi, page->index, &ni);
  791. if (ni.blk_addr == NULL_ADDR) {
  792. f2fs_put_page(page, 1);
  793. return -ENOENT;
  794. }
  795. if (PageUptodate(page))
  796. return LOCKED_PAGE;
  797. return f2fs_readpage(sbi, page, ni.blk_addr, type);
  798. }
  799. /*
  800. * Readahead a node page
  801. */
  802. void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
  803. {
  804. struct address_space *mapping = sbi->node_inode->i_mapping;
  805. struct page *apage;
  806. int err;
  807. apage = find_get_page(mapping, nid);
  808. if (apage && PageUptodate(apage)) {
  809. f2fs_put_page(apage, 0);
  810. return;
  811. }
  812. f2fs_put_page(apage, 0);
  813. apage = grab_cache_page(mapping, nid);
  814. if (!apage)
  815. return;
  816. err = read_node_page(apage, READA);
  817. if (err == 0)
  818. f2fs_put_page(apage, 0);
  819. else if (err == LOCKED_PAGE)
  820. f2fs_put_page(apage, 1);
  821. }
  822. struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
  823. {
  824. struct address_space *mapping = sbi->node_inode->i_mapping;
  825. struct page *page;
  826. int err;
  827. repeat:
  828. page = grab_cache_page(mapping, nid);
  829. if (!page)
  830. return ERR_PTR(-ENOMEM);
  831. err = read_node_page(page, READ_SYNC);
  832. if (err < 0)
  833. return ERR_PTR(err);
  834. else if (err == LOCKED_PAGE)
  835. goto got_it;
  836. lock_page(page);
  837. if (!PageUptodate(page)) {
  838. f2fs_put_page(page, 1);
  839. return ERR_PTR(-EIO);
  840. }
  841. if (page->mapping != mapping) {
  842. f2fs_put_page(page, 1);
  843. goto repeat;
  844. }
  845. got_it:
  846. f2fs_bug_on(nid != nid_of_node(page));
  847. mark_page_accessed(page);
  848. return page;
  849. }
  850. /*
  851. * Return a locked page for the desired node page.
  852. * And, readahead MAX_RA_NODE number of node pages.
  853. */
  854. struct page *get_node_page_ra(struct page *parent, int start)
  855. {
  856. struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
  857. struct address_space *mapping = sbi->node_inode->i_mapping;
  858. struct blk_plug plug;
  859. struct page *page;
  860. int err, i, end;
  861. nid_t nid;
  862. /* First, try getting the desired direct node. */
  863. nid = get_nid(parent, start, false);
  864. if (!nid)
  865. return ERR_PTR(-ENOENT);
  866. repeat:
  867. page = grab_cache_page(mapping, nid);
  868. if (!page)
  869. return ERR_PTR(-ENOMEM);
  870. err = read_node_page(page, READ_SYNC);
  871. if (err < 0)
  872. return ERR_PTR(err);
  873. else if (err == LOCKED_PAGE)
  874. goto page_hit;
  875. blk_start_plug(&plug);
  876. /* Then, try readahead for siblings of the desired node */
  877. end = start + MAX_RA_NODE;
  878. end = min(end, NIDS_PER_BLOCK);
  879. for (i = start + 1; i < end; i++) {
  880. nid = get_nid(parent, i, false);
  881. if (!nid)
  882. continue;
  883. ra_node_page(sbi, nid);
  884. }
  885. blk_finish_plug(&plug);
  886. lock_page(page);
  887. if (page->mapping != mapping) {
  888. f2fs_put_page(page, 1);
  889. goto repeat;
  890. }
  891. page_hit:
  892. if (!PageUptodate(page)) {
  893. f2fs_put_page(page, 1);
  894. return ERR_PTR(-EIO);
  895. }
  896. mark_page_accessed(page);
  897. return page;
  898. }
  899. void sync_inode_page(struct dnode_of_data *dn)
  900. {
  901. if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
  902. update_inode(dn->inode, dn->node_page);
  903. } else if (dn->inode_page) {
  904. if (!dn->inode_page_locked)
  905. lock_page(dn->inode_page);
  906. update_inode(dn->inode, dn->inode_page);
  907. if (!dn->inode_page_locked)
  908. unlock_page(dn->inode_page);
  909. } else {
  910. update_inode_page(dn->inode);
  911. }
  912. }
  913. int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
  914. struct writeback_control *wbc)
  915. {
  916. struct address_space *mapping = sbi->node_inode->i_mapping;
  917. pgoff_t index, end;
  918. struct pagevec pvec;
  919. int step = ino ? 2 : 0;
  920. int nwritten = 0, wrote = 0;
  921. pagevec_init(&pvec, 0);
  922. next_step:
  923. index = 0;
  924. end = LONG_MAX;
  925. while (index <= end) {
  926. int i, nr_pages;
  927. nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  928. PAGECACHE_TAG_DIRTY,
  929. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
  930. if (nr_pages == 0)
  931. break;
  932. for (i = 0; i < nr_pages; i++) {
  933. struct page *page = pvec.pages[i];
  934. /*
  935. * flushing sequence with step:
  936. * 0. indirect nodes
  937. * 1. dentry dnodes
  938. * 2. file dnodes
  939. */
  940. if (step == 0 && IS_DNODE(page))
  941. continue;
  942. if (step == 1 && (!IS_DNODE(page) ||
  943. is_cold_node(page)))
  944. continue;
  945. if (step == 2 && (!IS_DNODE(page) ||
  946. !is_cold_node(page)))
  947. continue;
  948. /*
  949. * If an fsync mode,
  950. * we should not skip writing node pages.
  951. */
  952. if (ino && ino_of_node(page) == ino)
  953. lock_page(page);
  954. else if (!trylock_page(page))
  955. continue;
  956. if (unlikely(page->mapping != mapping)) {
  957. continue_unlock:
  958. unlock_page(page);
  959. continue;
  960. }
  961. if (ino && ino_of_node(page) != ino)
  962. goto continue_unlock;
  963. if (!PageDirty(page)) {
  964. /* someone wrote it for us */
  965. goto continue_unlock;
  966. }
  967. if (!clear_page_dirty_for_io(page))
  968. goto continue_unlock;
  969. /* called by fsync() */
  970. if (ino && IS_DNODE(page)) {
  971. int mark = !is_checkpointed_node(sbi, ino);
  972. set_fsync_mark(page, 1);
  973. if (IS_INODE(page))
  974. set_dentry_mark(page, mark);
  975. nwritten++;
  976. } else {
  977. set_fsync_mark(page, 0);
  978. set_dentry_mark(page, 0);
  979. }
  980. mapping->a_ops->writepage(page, wbc);
  981. wrote++;
  982. if (--wbc->nr_to_write == 0)
  983. break;
  984. }
  985. pagevec_release(&pvec);
  986. cond_resched();
  987. if (wbc->nr_to_write == 0) {
  988. step = 2;
  989. break;
  990. }
  991. }
  992. if (step < 2) {
  993. step++;
  994. goto next_step;
  995. }
  996. if (wrote)
  997. f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
  998. return nwritten;
  999. }
  1000. int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
  1001. {
  1002. struct address_space *mapping = sbi->node_inode->i_mapping;
  1003. pgoff_t index = 0, end = LONG_MAX;
  1004. struct pagevec pvec;
  1005. int nr_pages;
  1006. int ret2 = 0, ret = 0;
  1007. pagevec_init(&pvec, 0);
  1008. while ((index <= end) &&
  1009. (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  1010. PAGECACHE_TAG_WRITEBACK,
  1011. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
  1012. unsigned i;
  1013. for (i = 0; i < nr_pages; i++) {
  1014. struct page *page = pvec.pages[i];
  1015. /* until radix tree lookup accepts end_index */
  1016. if (page->index > end)
  1017. continue;
  1018. if (ino && ino_of_node(page) == ino) {
  1019. wait_on_page_writeback(page);
  1020. if (TestClearPageError(page))
  1021. ret = -EIO;
  1022. }
  1023. }
  1024. pagevec_release(&pvec);
  1025. cond_resched();
  1026. }
  1027. if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
  1028. ret2 = -ENOSPC;
  1029. if (test_and_clear_bit(AS_EIO, &mapping->flags))
  1030. ret2 = -EIO;
  1031. if (!ret)
  1032. ret = ret2;
  1033. return ret;
  1034. }
  1035. static int f2fs_write_node_page(struct page *page,
  1036. struct writeback_control *wbc)
  1037. {
  1038. struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
  1039. nid_t nid;
  1040. block_t new_addr;
  1041. struct node_info ni;
  1042. if (sbi->por_doing)
  1043. goto redirty_out;
  1044. wait_on_page_writeback(page);
  1045. /* get old block addr of this node page */
  1046. nid = nid_of_node(page);
  1047. f2fs_bug_on(page->index != nid);
  1048. get_node_info(sbi, nid, &ni);
  1049. /* This page is already truncated */
  1050. if (ni.blk_addr == NULL_ADDR) {
  1051. dec_page_count(sbi, F2FS_DIRTY_NODES);
  1052. unlock_page(page);
  1053. return 0;
  1054. }
  1055. if (wbc->for_reclaim)
  1056. goto redirty_out;
  1057. mutex_lock(&sbi->node_write);
  1058. set_page_writeback(page);
  1059. write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
  1060. set_node_addr(sbi, &ni, new_addr);
  1061. dec_page_count(sbi, F2FS_DIRTY_NODES);
  1062. mutex_unlock(&sbi->node_write);
  1063. unlock_page(page);
  1064. return 0;
  1065. redirty_out:
  1066. dec_page_count(sbi, F2FS_DIRTY_NODES);
  1067. wbc->pages_skipped++;
  1068. set_page_dirty(page);
  1069. return AOP_WRITEPAGE_ACTIVATE;
  1070. }
  1071. /*
  1072. * It is very important to gather dirty pages and write at once, so that we can
  1073. * submit a big bio without interfering other data writes.
  1074. * Be default, 512 pages (2MB) * 3 node types, is more reasonable.
  1075. */
  1076. #define COLLECT_DIRTY_NODES 1536
  1077. static int f2fs_write_node_pages(struct address_space *mapping,
  1078. struct writeback_control *wbc)
  1079. {
  1080. struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
  1081. long nr_to_write = wbc->nr_to_write;
  1082. /* balancing f2fs's metadata in background */
  1083. f2fs_balance_fs_bg(sbi);
  1084. /* collect a number of dirty node pages and write together */
  1085. if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
  1086. return 0;
  1087. /* if mounting is failed, skip writing node pages */
  1088. wbc->nr_to_write = 3 * max_hw_blocks(sbi);
  1089. sync_node_pages(sbi, 0, wbc);
  1090. wbc->nr_to_write = nr_to_write - (3 * max_hw_blocks(sbi) -
  1091. wbc->nr_to_write);
  1092. return 0;
  1093. }
  1094. static int f2fs_set_node_page_dirty(struct page *page)
  1095. {
  1096. struct address_space *mapping = page->mapping;
  1097. struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
  1098. trace_f2fs_set_page_dirty(page, NODE);
  1099. SetPageUptodate(page);
  1100. if (!PageDirty(page)) {
  1101. __set_page_dirty_nobuffers(page);
  1102. inc_page_count(sbi, F2FS_DIRTY_NODES);
  1103. SetPagePrivate(page);
  1104. return 1;
  1105. }
  1106. return 0;
  1107. }
  1108. static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
  1109. unsigned int length)
  1110. {
  1111. struct inode *inode = page->mapping->host;
  1112. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  1113. if (PageDirty(page))
  1114. dec_page_count(sbi, F2FS_DIRTY_NODES);
  1115. ClearPagePrivate(page);
  1116. }
  1117. static int f2fs_release_node_page(struct page *page, gfp_t wait)
  1118. {
  1119. ClearPagePrivate(page);
  1120. return 1;
  1121. }
  1122. /*
  1123. * Structure of the f2fs node operations
  1124. */
  1125. const struct address_space_operations f2fs_node_aops = {
  1126. .writepage = f2fs_write_node_page,
  1127. .writepages = f2fs_write_node_pages,
  1128. .set_page_dirty = f2fs_set_node_page_dirty,
  1129. .invalidatepage = f2fs_invalidate_node_page,
  1130. .releasepage = f2fs_release_node_page,
  1131. };
  1132. static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
  1133. {
  1134. struct list_head *this;
  1135. struct free_nid *i;
  1136. list_for_each(this, head) {
  1137. i = list_entry(this, struct free_nid, list);
  1138. if (i->nid == n)
  1139. return i;
  1140. }
  1141. return NULL;
  1142. }
  1143. static void __del_from_free_nid_list(struct free_nid *i)
  1144. {
  1145. list_del(&i->list);
  1146. kmem_cache_free(free_nid_slab, i);
  1147. }
  1148. static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
  1149. {
  1150. struct free_nid *i;
  1151. struct nat_entry *ne;
  1152. bool allocated = false;
  1153. if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
  1154. return -1;
  1155. /* 0 nid should not be used */
  1156. if (nid == 0)
  1157. return 0;
  1158. if (build) {
  1159. /* do not add allocated nids */
  1160. read_lock(&nm_i->nat_tree_lock);
  1161. ne = __lookup_nat_cache(nm_i, nid);
  1162. if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
  1163. allocated = true;
  1164. read_unlock(&nm_i->nat_tree_lock);
  1165. if (allocated)
  1166. return 0;
  1167. }
  1168. i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
  1169. i->nid = nid;
  1170. i->state = NID_NEW;
  1171. spin_lock(&nm_i->free_nid_list_lock);
  1172. if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
  1173. spin_unlock(&nm_i->free_nid_list_lock);
  1174. kmem_cache_free(free_nid_slab, i);
  1175. return 0;
  1176. }
  1177. list_add_tail(&i->list, &nm_i->free_nid_list);
  1178. nm_i->fcnt++;
  1179. spin_unlock(&nm_i->free_nid_list_lock);
  1180. return 1;
  1181. }
  1182. static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
  1183. {
  1184. struct free_nid *i;
  1185. spin_lock(&nm_i->free_nid_list_lock);
  1186. i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
  1187. if (i && i->state == NID_NEW) {
  1188. __del_from_free_nid_list(i);
  1189. nm_i->fcnt--;
  1190. }
  1191. spin_unlock(&nm_i->free_nid_list_lock);
  1192. }
  1193. static void scan_nat_page(struct f2fs_nm_info *nm_i,
  1194. struct page *nat_page, nid_t start_nid)
  1195. {
  1196. struct f2fs_nat_block *nat_blk = page_address(nat_page);
  1197. block_t blk_addr;
  1198. int i;
  1199. i = start_nid % NAT_ENTRY_PER_BLOCK;
  1200. for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
  1201. if (start_nid >= nm_i->max_nid)
  1202. break;
  1203. blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
  1204. f2fs_bug_on(blk_addr == NEW_ADDR);
  1205. if (blk_addr == NULL_ADDR) {
  1206. if (add_free_nid(nm_i, start_nid, true) < 0)
  1207. break;
  1208. }
  1209. }
  1210. }
  1211. static void build_free_nids(struct f2fs_sb_info *sbi)
  1212. {
  1213. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1214. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
  1215. struct f2fs_summary_block *sum = curseg->sum_blk;
  1216. int i = 0;
  1217. nid_t nid = nm_i->next_scan_nid;
  1218. /* Enough entries */
  1219. if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
  1220. return;
  1221. /* readahead nat pages to be scanned */
  1222. ra_nat_pages(sbi, nid);
  1223. while (1) {
  1224. struct page *page = get_current_nat_page(sbi, nid);
  1225. scan_nat_page(nm_i, page, nid);
  1226. f2fs_put_page(page, 1);
  1227. nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
  1228. if (nid >= nm_i->max_nid)
  1229. nid = 0;
  1230. if (i++ == FREE_NID_PAGES)
  1231. break;
  1232. }
  1233. /* go to the next free nat pages to find free nids abundantly */
  1234. nm_i->next_scan_nid = nid;
  1235. /* find free nids from current sum_pages */
  1236. mutex_lock(&curseg->curseg_mutex);
  1237. for (i = 0; i < nats_in_cursum(sum); i++) {
  1238. block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
  1239. nid = le32_to_cpu(nid_in_journal(sum, i));
  1240. if (addr == NULL_ADDR)
  1241. add_free_nid(nm_i, nid, true);
  1242. else
  1243. remove_free_nid(nm_i, nid);
  1244. }
  1245. mutex_unlock(&curseg->curseg_mutex);
  1246. }
  1247. /*
  1248. * If this function returns success, caller can obtain a new nid
  1249. * from second parameter of this function.
  1250. * The returned nid could be used ino as well as nid when inode is created.
  1251. */
  1252. bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
  1253. {
  1254. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1255. struct free_nid *i = NULL;
  1256. struct list_head *this;
  1257. retry:
  1258. if (sbi->total_valid_node_count + 1 >= nm_i->max_nid)
  1259. return false;
  1260. spin_lock(&nm_i->free_nid_list_lock);
  1261. /* We should not use stale free nids created by build_free_nids */
  1262. if (nm_i->fcnt && !sbi->on_build_free_nids) {
  1263. f2fs_bug_on(list_empty(&nm_i->free_nid_list));
  1264. list_for_each(this, &nm_i->free_nid_list) {
  1265. i = list_entry(this, struct free_nid, list);
  1266. if (i->state == NID_NEW)
  1267. break;
  1268. }
  1269. f2fs_bug_on(i->state != NID_NEW);
  1270. *nid = i->nid;
  1271. i->state = NID_ALLOC;
  1272. nm_i->fcnt--;
  1273. spin_unlock(&nm_i->free_nid_list_lock);
  1274. return true;
  1275. }
  1276. spin_unlock(&nm_i->free_nid_list_lock);
  1277. /* Let's scan nat pages and its caches to get free nids */
  1278. mutex_lock(&nm_i->build_lock);
  1279. sbi->on_build_free_nids = true;
  1280. build_free_nids(sbi);
  1281. sbi->on_build_free_nids = false;
  1282. mutex_unlock(&nm_i->build_lock);
  1283. goto retry;
  1284. }
  1285. /*
  1286. * alloc_nid() should be called prior to this function.
  1287. */
  1288. void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
  1289. {
  1290. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1291. struct free_nid *i;
  1292. spin_lock(&nm_i->free_nid_list_lock);
  1293. i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
  1294. f2fs_bug_on(!i || i->state != NID_ALLOC);
  1295. __del_from_free_nid_list(i);
  1296. spin_unlock(&nm_i->free_nid_list_lock);
  1297. }
  1298. /*
  1299. * alloc_nid() should be called prior to this function.
  1300. */
  1301. void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
  1302. {
  1303. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1304. struct free_nid *i;
  1305. if (!nid)
  1306. return;
  1307. spin_lock(&nm_i->free_nid_list_lock);
  1308. i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
  1309. f2fs_bug_on(!i || i->state != NID_ALLOC);
  1310. if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
  1311. __del_from_free_nid_list(i);
  1312. } else {
  1313. i->state = NID_NEW;
  1314. nm_i->fcnt++;
  1315. }
  1316. spin_unlock(&nm_i->free_nid_list_lock);
  1317. }
  1318. void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
  1319. struct f2fs_summary *sum, struct node_info *ni,
  1320. block_t new_blkaddr)
  1321. {
  1322. rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
  1323. set_node_addr(sbi, ni, new_blkaddr);
  1324. clear_node_page_dirty(page);
  1325. }
  1326. int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
  1327. {
  1328. struct address_space *mapping = sbi->node_inode->i_mapping;
  1329. struct f2fs_node *src, *dst;
  1330. nid_t ino = ino_of_node(page);
  1331. struct node_info old_ni, new_ni;
  1332. struct page *ipage;
  1333. ipage = grab_cache_page(mapping, ino);
  1334. if (!ipage)
  1335. return -ENOMEM;
  1336. /* Should not use this inode from free nid list */
  1337. remove_free_nid(NM_I(sbi), ino);
  1338. get_node_info(sbi, ino, &old_ni);
  1339. SetPageUptodate(ipage);
  1340. fill_node_footer(ipage, ino, ino, 0, true);
  1341. src = F2FS_NODE(page);
  1342. dst = F2FS_NODE(ipage);
  1343. memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
  1344. dst->i.i_size = 0;
  1345. dst->i.i_blocks = cpu_to_le64(1);
  1346. dst->i.i_links = cpu_to_le32(1);
  1347. dst->i.i_xattr_nid = 0;
  1348. new_ni = old_ni;
  1349. new_ni.ino = ino;
  1350. if (!inc_valid_node_count(sbi, NULL))
  1351. WARN_ON(1);
  1352. set_node_addr(sbi, &new_ni, NEW_ADDR);
  1353. inc_valid_inode_count(sbi);
  1354. f2fs_put_page(ipage, 1);
  1355. return 0;
  1356. }
  1357. int restore_node_summary(struct f2fs_sb_info *sbi,
  1358. unsigned int segno, struct f2fs_summary_block *sum)
  1359. {
  1360. struct f2fs_node *rn;
  1361. struct f2fs_summary *sum_entry;
  1362. struct page *page;
  1363. block_t addr;
  1364. int i, last_offset;
  1365. /* alloc temporal page for read node */
  1366. page = alloc_page(GFP_NOFS | __GFP_ZERO);
  1367. if (!page)
  1368. return -ENOMEM;
  1369. lock_page(page);
  1370. /* scan the node segment */
  1371. last_offset = sbi->blocks_per_seg;
  1372. addr = START_BLOCK(sbi, segno);
  1373. sum_entry = &sum->entries[0];
  1374. for (i = 0; i < last_offset; i++, sum_entry++) {
  1375. /*
  1376. * In order to read next node page,
  1377. * we must clear PageUptodate flag.
  1378. */
  1379. ClearPageUptodate(page);
  1380. if (f2fs_readpage(sbi, page, addr, READ_SYNC))
  1381. goto out;
  1382. lock_page(page);
  1383. rn = F2FS_NODE(page);
  1384. sum_entry->nid = rn->footer.nid;
  1385. sum_entry->version = 0;
  1386. sum_entry->ofs_in_node = 0;
  1387. addr++;
  1388. }
  1389. unlock_page(page);
  1390. out:
  1391. __free_pages(page, 0);
  1392. return 0;
  1393. }
  1394. static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
  1395. {
  1396. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1397. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
  1398. struct f2fs_summary_block *sum = curseg->sum_blk;
  1399. int i;
  1400. mutex_lock(&curseg->curseg_mutex);
  1401. if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
  1402. mutex_unlock(&curseg->curseg_mutex);
  1403. return false;
  1404. }
  1405. for (i = 0; i < nats_in_cursum(sum); i++) {
  1406. struct nat_entry *ne;
  1407. struct f2fs_nat_entry raw_ne;
  1408. nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
  1409. raw_ne = nat_in_journal(sum, i);
  1410. retry:
  1411. write_lock(&nm_i->nat_tree_lock);
  1412. ne = __lookup_nat_cache(nm_i, nid);
  1413. if (ne) {
  1414. __set_nat_cache_dirty(nm_i, ne);
  1415. write_unlock(&nm_i->nat_tree_lock);
  1416. continue;
  1417. }
  1418. ne = grab_nat_entry(nm_i, nid);
  1419. if (!ne) {
  1420. write_unlock(&nm_i->nat_tree_lock);
  1421. goto retry;
  1422. }
  1423. nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
  1424. nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
  1425. nat_set_version(ne, raw_ne.version);
  1426. __set_nat_cache_dirty(nm_i, ne);
  1427. write_unlock(&nm_i->nat_tree_lock);
  1428. }
  1429. update_nats_in_cursum(sum, -i);
  1430. mutex_unlock(&curseg->curseg_mutex);
  1431. return true;
  1432. }
  1433. /*
  1434. * This function is called during the checkpointing process.
  1435. */
  1436. void flush_nat_entries(struct f2fs_sb_info *sbi)
  1437. {
  1438. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1439. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
  1440. struct f2fs_summary_block *sum = curseg->sum_blk;
  1441. struct list_head *cur, *n;
  1442. struct page *page = NULL;
  1443. struct f2fs_nat_block *nat_blk = NULL;
  1444. nid_t start_nid = 0, end_nid = 0;
  1445. bool flushed;
  1446. flushed = flush_nats_in_journal(sbi);
  1447. if (!flushed)
  1448. mutex_lock(&curseg->curseg_mutex);
  1449. /* 1) flush dirty nat caches */
  1450. list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
  1451. struct nat_entry *ne;
  1452. nid_t nid;
  1453. struct f2fs_nat_entry raw_ne;
  1454. int offset = -1;
  1455. block_t new_blkaddr;
  1456. ne = list_entry(cur, struct nat_entry, list);
  1457. nid = nat_get_nid(ne);
  1458. if (nat_get_blkaddr(ne) == NEW_ADDR)
  1459. continue;
  1460. if (flushed)
  1461. goto to_nat_page;
  1462. /* if there is room for nat enries in curseg->sumpage */
  1463. offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
  1464. if (offset >= 0) {
  1465. raw_ne = nat_in_journal(sum, offset);
  1466. goto flush_now;
  1467. }
  1468. to_nat_page:
  1469. if (!page || (start_nid > nid || nid > end_nid)) {
  1470. if (page) {
  1471. f2fs_put_page(page, 1);
  1472. page = NULL;
  1473. }
  1474. start_nid = START_NID(nid);
  1475. end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
  1476. /*
  1477. * get nat block with dirty flag, increased reference
  1478. * count, mapped and lock
  1479. */
  1480. page = get_next_nat_page(sbi, start_nid);
  1481. nat_blk = page_address(page);
  1482. }
  1483. f2fs_bug_on(!nat_blk);
  1484. raw_ne = nat_blk->entries[nid - start_nid];
  1485. flush_now:
  1486. new_blkaddr = nat_get_blkaddr(ne);
  1487. raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
  1488. raw_ne.block_addr = cpu_to_le32(new_blkaddr);
  1489. raw_ne.version = nat_get_version(ne);
  1490. if (offset < 0) {
  1491. nat_blk->entries[nid - start_nid] = raw_ne;
  1492. } else {
  1493. nat_in_journal(sum, offset) = raw_ne;
  1494. nid_in_journal(sum, offset) = cpu_to_le32(nid);
  1495. }
  1496. if (nat_get_blkaddr(ne) == NULL_ADDR &&
  1497. add_free_nid(NM_I(sbi), nid, false) <= 0) {
  1498. write_lock(&nm_i->nat_tree_lock);
  1499. __del_from_nat_cache(nm_i, ne);
  1500. write_unlock(&nm_i->nat_tree_lock);
  1501. } else {
  1502. write_lock(&nm_i->nat_tree_lock);
  1503. __clear_nat_cache_dirty(nm_i, ne);
  1504. ne->checkpointed = true;
  1505. write_unlock(&nm_i->nat_tree_lock);
  1506. }
  1507. }
  1508. if (!flushed)
  1509. mutex_unlock(&curseg->curseg_mutex);
  1510. f2fs_put_page(page, 1);
  1511. /* 2) shrink nat caches if necessary */
  1512. try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
  1513. }
  1514. static int init_node_manager(struct f2fs_sb_info *sbi)
  1515. {
  1516. struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
  1517. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1518. unsigned char *version_bitmap;
  1519. unsigned int nat_segs, nat_blocks;
  1520. nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
  1521. /* segment_count_nat includes pair segment so divide to 2. */
  1522. nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
  1523. nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
  1524. nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
  1525. nm_i->fcnt = 0;
  1526. nm_i->nat_cnt = 0;
  1527. INIT_LIST_HEAD(&nm_i->free_nid_list);
  1528. INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
  1529. INIT_LIST_HEAD(&nm_i->nat_entries);
  1530. INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
  1531. mutex_init(&nm_i->build_lock);
  1532. spin_lock_init(&nm_i->free_nid_list_lock);
  1533. rwlock_init(&nm_i->nat_tree_lock);
  1534. nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
  1535. nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
  1536. version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
  1537. if (!version_bitmap)
  1538. return -EFAULT;
  1539. nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
  1540. GFP_KERNEL);
  1541. if (!nm_i->nat_bitmap)
  1542. return -ENOMEM;
  1543. return 0;
  1544. }
  1545. int build_node_manager(struct f2fs_sb_info *sbi)
  1546. {
  1547. int err;
  1548. sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
  1549. if (!sbi->nm_info)
  1550. return -ENOMEM;
  1551. err = init_node_manager(sbi);
  1552. if (err)
  1553. return err;
  1554. build_free_nids(sbi);
  1555. return 0;
  1556. }
  1557. void destroy_node_manager(struct f2fs_sb_info *sbi)
  1558. {
  1559. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1560. struct free_nid *i, *next_i;
  1561. struct nat_entry *natvec[NATVEC_SIZE];
  1562. nid_t nid = 0;
  1563. unsigned int found;
  1564. if (!nm_i)
  1565. return;
  1566. /* destroy free nid list */
  1567. spin_lock(&nm_i->free_nid_list_lock);
  1568. list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
  1569. f2fs_bug_on(i->state == NID_ALLOC);
  1570. __del_from_free_nid_list(i);
  1571. nm_i->fcnt--;
  1572. }
  1573. f2fs_bug_on(nm_i->fcnt);
  1574. spin_unlock(&nm_i->free_nid_list_lock);
  1575. /* destroy nat cache */
  1576. write_lock(&nm_i->nat_tree_lock);
  1577. while ((found = __gang_lookup_nat_cache(nm_i,
  1578. nid, NATVEC_SIZE, natvec))) {
  1579. unsigned idx;
  1580. for (idx = 0; idx < found; idx++) {
  1581. struct nat_entry *e = natvec[idx];
  1582. nid = nat_get_nid(e) + 1;
  1583. __del_from_nat_cache(nm_i, e);
  1584. }
  1585. }
  1586. f2fs_bug_on(nm_i->nat_cnt);
  1587. write_unlock(&nm_i->nat_tree_lock);
  1588. kfree(nm_i->nat_bitmap);
  1589. sbi->nm_info = NULL;
  1590. kfree(nm_i);
  1591. }
  1592. int __init create_node_manager_caches(void)
  1593. {
  1594. nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
  1595. sizeof(struct nat_entry), NULL);
  1596. if (!nat_entry_slab)
  1597. return -ENOMEM;
  1598. free_nid_slab = f2fs_kmem_cache_create("free_nid",
  1599. sizeof(struct free_nid), NULL);
  1600. if (!free_nid_slab) {
  1601. kmem_cache_destroy(nat_entry_slab);
  1602. return -ENOMEM;
  1603. }
  1604. return 0;
  1605. }
  1606. void destroy_node_manager_caches(void)
  1607. {
  1608. kmem_cache_destroy(free_nid_slab);
  1609. kmem_cache_destroy(nat_entry_slab);
  1610. }