node.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081
  1. /*
  2. * fs/f2fs/node.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/f2fs_fs.h>
  13. #include <linux/mpage.h>
  14. #include <linux/backing-dev.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/pagevec.h>
  17. #include <linux/swap.h>
  18. #include "f2fs.h"
  19. #include "node.h"
  20. #include "segment.h"
  21. #include <trace/events/f2fs.h>
  22. #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
  23. static struct kmem_cache *nat_entry_slab;
  24. static struct kmem_cache *free_nid_slab;
  25. static struct kmem_cache *nat_entry_set_slab;
  26. bool available_free_memory(struct f2fs_sb_info *sbi, int type)
  27. {
  28. struct f2fs_nm_info *nm_i = NM_I(sbi);
  29. struct sysinfo val;
  30. unsigned long mem_size = 0;
  31. bool res = false;
  32. si_meminfo(&val);
  33. /* give 25%, 25%, 50% memory for each components respectively */
  34. if (type == FREE_NIDS) {
  35. mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 12;
  36. res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
  37. } else if (type == NAT_ENTRIES) {
  38. mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12;
  39. res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
  40. } else if (type == DIRTY_DENTS) {
  41. if (sbi->sb->s_bdi->dirty_exceeded)
  42. return false;
  43. mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
  44. res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1);
  45. }
  46. return res;
  47. }
  48. static void clear_node_page_dirty(struct page *page)
  49. {
  50. struct address_space *mapping = page->mapping;
  51. struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
  52. unsigned int long flags;
  53. if (PageDirty(page)) {
  54. spin_lock_irqsave(&mapping->tree_lock, flags);
  55. radix_tree_tag_clear(&mapping->page_tree,
  56. page_index(page),
  57. PAGECACHE_TAG_DIRTY);
  58. spin_unlock_irqrestore(&mapping->tree_lock, flags);
  59. clear_page_dirty_for_io(page);
  60. dec_page_count(sbi, F2FS_DIRTY_NODES);
  61. }
  62. ClearPageUptodate(page);
  63. }
  64. static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
  65. {
  66. pgoff_t index = current_nat_addr(sbi, nid);
  67. return get_meta_page(sbi, index);
  68. }
  69. static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
  70. {
  71. struct page *src_page;
  72. struct page *dst_page;
  73. pgoff_t src_off;
  74. pgoff_t dst_off;
  75. void *src_addr;
  76. void *dst_addr;
  77. struct f2fs_nm_info *nm_i = NM_I(sbi);
  78. src_off = current_nat_addr(sbi, nid);
  79. dst_off = next_nat_addr(sbi, src_off);
  80. /* get current nat block page with lock */
  81. src_page = get_meta_page(sbi, src_off);
  82. dst_page = grab_meta_page(sbi, dst_off);
  83. f2fs_bug_on(PageDirty(src_page));
  84. src_addr = page_address(src_page);
  85. dst_addr = page_address(dst_page);
  86. memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
  87. set_page_dirty(dst_page);
  88. f2fs_put_page(src_page, 1);
  89. set_to_next_nat(nm_i, nid);
  90. return dst_page;
  91. }
  92. static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
  93. {
  94. return radix_tree_lookup(&nm_i->nat_root, n);
  95. }
  96. static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
  97. nid_t start, unsigned int nr, struct nat_entry **ep)
  98. {
  99. return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
  100. }
  101. static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
  102. {
  103. list_del(&e->list);
  104. radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
  105. nm_i->nat_cnt--;
  106. kmem_cache_free(nat_entry_slab, e);
  107. }
  108. int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
  109. {
  110. struct f2fs_nm_info *nm_i = NM_I(sbi);
  111. struct nat_entry *e;
  112. int is_cp = 1;
  113. read_lock(&nm_i->nat_tree_lock);
  114. e = __lookup_nat_cache(nm_i, nid);
  115. if (e && !e->checkpointed)
  116. is_cp = 0;
  117. read_unlock(&nm_i->nat_tree_lock);
  118. return is_cp;
  119. }
  120. bool fsync_mark_done(struct f2fs_sb_info *sbi, nid_t nid)
  121. {
  122. struct f2fs_nm_info *nm_i = NM_I(sbi);
  123. struct nat_entry *e;
  124. bool fsync_done = false;
  125. read_lock(&nm_i->nat_tree_lock);
  126. e = __lookup_nat_cache(nm_i, nid);
  127. if (e)
  128. fsync_done = e->fsync_done;
  129. read_unlock(&nm_i->nat_tree_lock);
  130. return fsync_done;
  131. }
  132. void fsync_mark_clear(struct f2fs_sb_info *sbi, nid_t nid)
  133. {
  134. struct f2fs_nm_info *nm_i = NM_I(sbi);
  135. struct nat_entry *e;
  136. write_lock(&nm_i->nat_tree_lock);
  137. e = __lookup_nat_cache(nm_i, nid);
  138. if (e)
  139. e->fsync_done = false;
  140. write_unlock(&nm_i->nat_tree_lock);
  141. }
  142. static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
  143. {
  144. struct nat_entry *new;
  145. new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
  146. if (!new)
  147. return NULL;
  148. if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
  149. kmem_cache_free(nat_entry_slab, new);
  150. return NULL;
  151. }
  152. memset(new, 0, sizeof(struct nat_entry));
  153. nat_set_nid(new, nid);
  154. new->checkpointed = true;
  155. list_add_tail(&new->list, &nm_i->nat_entries);
  156. nm_i->nat_cnt++;
  157. return new;
  158. }
  159. static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
  160. struct f2fs_nat_entry *ne)
  161. {
  162. struct nat_entry *e;
  163. retry:
  164. write_lock(&nm_i->nat_tree_lock);
  165. e = __lookup_nat_cache(nm_i, nid);
  166. if (!e) {
  167. e = grab_nat_entry(nm_i, nid);
  168. if (!e) {
  169. write_unlock(&nm_i->nat_tree_lock);
  170. goto retry;
  171. }
  172. node_info_from_raw_nat(&e->ni, ne);
  173. }
  174. write_unlock(&nm_i->nat_tree_lock);
  175. }
  176. static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
  177. block_t new_blkaddr, bool fsync_done)
  178. {
  179. struct f2fs_nm_info *nm_i = NM_I(sbi);
  180. struct nat_entry *e;
  181. retry:
  182. write_lock(&nm_i->nat_tree_lock);
  183. e = __lookup_nat_cache(nm_i, ni->nid);
  184. if (!e) {
  185. e = grab_nat_entry(nm_i, ni->nid);
  186. if (!e) {
  187. write_unlock(&nm_i->nat_tree_lock);
  188. goto retry;
  189. }
  190. e->ni = *ni;
  191. f2fs_bug_on(ni->blk_addr == NEW_ADDR);
  192. } else if (new_blkaddr == NEW_ADDR) {
  193. /*
  194. * when nid is reallocated,
  195. * previous nat entry can be remained in nat cache.
  196. * So, reinitialize it with new information.
  197. */
  198. e->ni = *ni;
  199. f2fs_bug_on(ni->blk_addr != NULL_ADDR);
  200. }
  201. /* sanity check */
  202. f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
  203. f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
  204. new_blkaddr == NULL_ADDR);
  205. f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
  206. new_blkaddr == NEW_ADDR);
  207. f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
  208. nat_get_blkaddr(e) != NULL_ADDR &&
  209. new_blkaddr == NEW_ADDR);
  210. /* increment version no as node is removed */
  211. if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
  212. unsigned char version = nat_get_version(e);
  213. nat_set_version(e, inc_node_version(version));
  214. }
  215. /* change address */
  216. nat_set_blkaddr(e, new_blkaddr);
  217. __set_nat_cache_dirty(nm_i, e);
  218. /* update fsync_mark if its inode nat entry is still alive */
  219. e = __lookup_nat_cache(nm_i, ni->ino);
  220. if (e)
  221. e->fsync_done = fsync_done;
  222. write_unlock(&nm_i->nat_tree_lock);
  223. }
  224. int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
  225. {
  226. struct f2fs_nm_info *nm_i = NM_I(sbi);
  227. if (available_free_memory(sbi, NAT_ENTRIES))
  228. return 0;
  229. write_lock(&nm_i->nat_tree_lock);
  230. while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
  231. struct nat_entry *ne;
  232. ne = list_first_entry(&nm_i->nat_entries,
  233. struct nat_entry, list);
  234. __del_from_nat_cache(nm_i, ne);
  235. nr_shrink--;
  236. }
  237. write_unlock(&nm_i->nat_tree_lock);
  238. return nr_shrink;
  239. }
  240. /*
  241. * This function always returns success
  242. */
  243. void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
  244. {
  245. struct f2fs_nm_info *nm_i = NM_I(sbi);
  246. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
  247. struct f2fs_summary_block *sum = curseg->sum_blk;
  248. nid_t start_nid = START_NID(nid);
  249. struct f2fs_nat_block *nat_blk;
  250. struct page *page = NULL;
  251. struct f2fs_nat_entry ne;
  252. struct nat_entry *e;
  253. int i;
  254. memset(&ne, 0, sizeof(struct f2fs_nat_entry));
  255. ni->nid = nid;
  256. /* Check nat cache */
  257. read_lock(&nm_i->nat_tree_lock);
  258. e = __lookup_nat_cache(nm_i, nid);
  259. if (e) {
  260. ni->ino = nat_get_ino(e);
  261. ni->blk_addr = nat_get_blkaddr(e);
  262. ni->version = nat_get_version(e);
  263. }
  264. read_unlock(&nm_i->nat_tree_lock);
  265. if (e)
  266. return;
  267. /* Check current segment summary */
  268. mutex_lock(&curseg->curseg_mutex);
  269. i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
  270. if (i >= 0) {
  271. ne = nat_in_journal(sum, i);
  272. node_info_from_raw_nat(ni, &ne);
  273. }
  274. mutex_unlock(&curseg->curseg_mutex);
  275. if (i >= 0)
  276. goto cache;
  277. /* Fill node_info from nat page */
  278. page = get_current_nat_page(sbi, start_nid);
  279. nat_blk = (struct f2fs_nat_block *)page_address(page);
  280. ne = nat_blk->entries[nid - start_nid];
  281. node_info_from_raw_nat(ni, &ne);
  282. f2fs_put_page(page, 1);
  283. cache:
  284. /* cache nat entry */
  285. cache_nat_entry(NM_I(sbi), nid, &ne);
  286. }
  287. /*
  288. * The maximum depth is four.
  289. * Offset[0] will have raw inode offset.
  290. */
  291. static int get_node_path(struct f2fs_inode_info *fi, long block,
  292. int offset[4], unsigned int noffset[4])
  293. {
  294. const long direct_index = ADDRS_PER_INODE(fi);
  295. const long direct_blks = ADDRS_PER_BLOCK;
  296. const long dptrs_per_blk = NIDS_PER_BLOCK;
  297. const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
  298. const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
  299. int n = 0;
  300. int level = 0;
  301. noffset[0] = 0;
  302. if (block < direct_index) {
  303. offset[n] = block;
  304. goto got;
  305. }
  306. block -= direct_index;
  307. if (block < direct_blks) {
  308. offset[n++] = NODE_DIR1_BLOCK;
  309. noffset[n] = 1;
  310. offset[n] = block;
  311. level = 1;
  312. goto got;
  313. }
  314. block -= direct_blks;
  315. if (block < direct_blks) {
  316. offset[n++] = NODE_DIR2_BLOCK;
  317. noffset[n] = 2;
  318. offset[n] = block;
  319. level = 1;
  320. goto got;
  321. }
  322. block -= direct_blks;
  323. if (block < indirect_blks) {
  324. offset[n++] = NODE_IND1_BLOCK;
  325. noffset[n] = 3;
  326. offset[n++] = block / direct_blks;
  327. noffset[n] = 4 + offset[n - 1];
  328. offset[n] = block % direct_blks;
  329. level = 2;
  330. goto got;
  331. }
  332. block -= indirect_blks;
  333. if (block < indirect_blks) {
  334. offset[n++] = NODE_IND2_BLOCK;
  335. noffset[n] = 4 + dptrs_per_blk;
  336. offset[n++] = block / direct_blks;
  337. noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
  338. offset[n] = block % direct_blks;
  339. level = 2;
  340. goto got;
  341. }
  342. block -= indirect_blks;
  343. if (block < dindirect_blks) {
  344. offset[n++] = NODE_DIND_BLOCK;
  345. noffset[n] = 5 + (dptrs_per_blk * 2);
  346. offset[n++] = block / indirect_blks;
  347. noffset[n] = 6 + (dptrs_per_blk * 2) +
  348. offset[n - 1] * (dptrs_per_blk + 1);
  349. offset[n++] = (block / direct_blks) % dptrs_per_blk;
  350. noffset[n] = 7 + (dptrs_per_blk * 2) +
  351. offset[n - 2] * (dptrs_per_blk + 1) +
  352. offset[n - 1];
  353. offset[n] = block % direct_blks;
  354. level = 3;
  355. goto got;
  356. } else {
  357. BUG();
  358. }
  359. got:
  360. return level;
  361. }
  362. /*
  363. * Caller should call f2fs_put_dnode(dn).
  364. * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
  365. * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
  366. * In the case of RDONLY_NODE, we don't need to care about mutex.
  367. */
  368. int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
  369. {
  370. struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  371. struct page *npage[4];
  372. struct page *parent;
  373. int offset[4];
  374. unsigned int noffset[4];
  375. nid_t nids[4];
  376. int level, i;
  377. int err = 0;
  378. level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
  379. nids[0] = dn->inode->i_ino;
  380. npage[0] = dn->inode_page;
  381. if (!npage[0]) {
  382. npage[0] = get_node_page(sbi, nids[0]);
  383. if (IS_ERR(npage[0]))
  384. return PTR_ERR(npage[0]);
  385. }
  386. parent = npage[0];
  387. if (level != 0)
  388. nids[1] = get_nid(parent, offset[0], true);
  389. dn->inode_page = npage[0];
  390. dn->inode_page_locked = true;
  391. /* get indirect or direct nodes */
  392. for (i = 1; i <= level; i++) {
  393. bool done = false;
  394. if (!nids[i] && mode == ALLOC_NODE) {
  395. /* alloc new node */
  396. if (!alloc_nid(sbi, &(nids[i]))) {
  397. err = -ENOSPC;
  398. goto release_pages;
  399. }
  400. dn->nid = nids[i];
  401. npage[i] = new_node_page(dn, noffset[i], NULL);
  402. if (IS_ERR(npage[i])) {
  403. alloc_nid_failed(sbi, nids[i]);
  404. err = PTR_ERR(npage[i]);
  405. goto release_pages;
  406. }
  407. set_nid(parent, offset[i - 1], nids[i], i == 1);
  408. alloc_nid_done(sbi, nids[i]);
  409. done = true;
  410. } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
  411. npage[i] = get_node_page_ra(parent, offset[i - 1]);
  412. if (IS_ERR(npage[i])) {
  413. err = PTR_ERR(npage[i]);
  414. goto release_pages;
  415. }
  416. done = true;
  417. }
  418. if (i == 1) {
  419. dn->inode_page_locked = false;
  420. unlock_page(parent);
  421. } else {
  422. f2fs_put_page(parent, 1);
  423. }
  424. if (!done) {
  425. npage[i] = get_node_page(sbi, nids[i]);
  426. if (IS_ERR(npage[i])) {
  427. err = PTR_ERR(npage[i]);
  428. f2fs_put_page(npage[0], 0);
  429. goto release_out;
  430. }
  431. }
  432. if (i < level) {
  433. parent = npage[i];
  434. nids[i + 1] = get_nid(parent, offset[i], false);
  435. }
  436. }
  437. dn->nid = nids[level];
  438. dn->ofs_in_node = offset[level];
  439. dn->node_page = npage[level];
  440. dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
  441. return 0;
  442. release_pages:
  443. f2fs_put_page(parent, 1);
  444. if (i > 1)
  445. f2fs_put_page(npage[0], 0);
  446. release_out:
  447. dn->inode_page = NULL;
  448. dn->node_page = NULL;
  449. return err;
  450. }
  451. static void truncate_node(struct dnode_of_data *dn)
  452. {
  453. struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  454. struct node_info ni;
  455. get_node_info(sbi, dn->nid, &ni);
  456. if (dn->inode->i_blocks == 0) {
  457. f2fs_bug_on(ni.blk_addr != NULL_ADDR);
  458. goto invalidate;
  459. }
  460. f2fs_bug_on(ni.blk_addr == NULL_ADDR);
  461. /* Deallocate node address */
  462. invalidate_blocks(sbi, ni.blk_addr);
  463. dec_valid_node_count(sbi, dn->inode);
  464. set_node_addr(sbi, &ni, NULL_ADDR, false);
  465. if (dn->nid == dn->inode->i_ino) {
  466. remove_orphan_inode(sbi, dn->nid);
  467. dec_valid_inode_count(sbi);
  468. } else {
  469. sync_inode_page(dn);
  470. }
  471. invalidate:
  472. clear_node_page_dirty(dn->node_page);
  473. F2FS_SET_SB_DIRT(sbi);
  474. f2fs_put_page(dn->node_page, 1);
  475. invalidate_mapping_pages(NODE_MAPPING(sbi),
  476. dn->node_page->index, dn->node_page->index);
  477. dn->node_page = NULL;
  478. trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
  479. }
  480. static int truncate_dnode(struct dnode_of_data *dn)
  481. {
  482. struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  483. struct page *page;
  484. if (dn->nid == 0)
  485. return 1;
  486. /* get direct node */
  487. page = get_node_page(sbi, dn->nid);
  488. if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
  489. return 1;
  490. else if (IS_ERR(page))
  491. return PTR_ERR(page);
  492. /* Make dnode_of_data for parameter */
  493. dn->node_page = page;
  494. dn->ofs_in_node = 0;
  495. truncate_data_blocks(dn);
  496. truncate_node(dn);
  497. return 1;
  498. }
  499. static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
  500. int ofs, int depth)
  501. {
  502. struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  503. struct dnode_of_data rdn = *dn;
  504. struct page *page;
  505. struct f2fs_node *rn;
  506. nid_t child_nid;
  507. unsigned int child_nofs;
  508. int freed = 0;
  509. int i, ret;
  510. if (dn->nid == 0)
  511. return NIDS_PER_BLOCK + 1;
  512. trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
  513. page = get_node_page(sbi, dn->nid);
  514. if (IS_ERR(page)) {
  515. trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
  516. return PTR_ERR(page);
  517. }
  518. rn = F2FS_NODE(page);
  519. if (depth < 3) {
  520. for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
  521. child_nid = le32_to_cpu(rn->in.nid[i]);
  522. if (child_nid == 0)
  523. continue;
  524. rdn.nid = child_nid;
  525. ret = truncate_dnode(&rdn);
  526. if (ret < 0)
  527. goto out_err;
  528. set_nid(page, i, 0, false);
  529. }
  530. } else {
  531. child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
  532. for (i = ofs; i < NIDS_PER_BLOCK; i++) {
  533. child_nid = le32_to_cpu(rn->in.nid[i]);
  534. if (child_nid == 0) {
  535. child_nofs += NIDS_PER_BLOCK + 1;
  536. continue;
  537. }
  538. rdn.nid = child_nid;
  539. ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
  540. if (ret == (NIDS_PER_BLOCK + 1)) {
  541. set_nid(page, i, 0, false);
  542. child_nofs += ret;
  543. } else if (ret < 0 && ret != -ENOENT) {
  544. goto out_err;
  545. }
  546. }
  547. freed = child_nofs;
  548. }
  549. if (!ofs) {
  550. /* remove current indirect node */
  551. dn->node_page = page;
  552. truncate_node(dn);
  553. freed++;
  554. } else {
  555. f2fs_put_page(page, 1);
  556. }
  557. trace_f2fs_truncate_nodes_exit(dn->inode, freed);
  558. return freed;
  559. out_err:
  560. f2fs_put_page(page, 1);
  561. trace_f2fs_truncate_nodes_exit(dn->inode, ret);
  562. return ret;
  563. }
  564. static int truncate_partial_nodes(struct dnode_of_data *dn,
  565. struct f2fs_inode *ri, int *offset, int depth)
  566. {
  567. struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  568. struct page *pages[2];
  569. nid_t nid[3];
  570. nid_t child_nid;
  571. int err = 0;
  572. int i;
  573. int idx = depth - 2;
  574. nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
  575. if (!nid[0])
  576. return 0;
  577. /* get indirect nodes in the path */
  578. for (i = 0; i < idx + 1; i++) {
  579. /* reference count'll be increased */
  580. pages[i] = get_node_page(sbi, nid[i]);
  581. if (IS_ERR(pages[i])) {
  582. err = PTR_ERR(pages[i]);
  583. idx = i - 1;
  584. goto fail;
  585. }
  586. nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
  587. }
  588. /* free direct nodes linked to a partial indirect node */
  589. for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
  590. child_nid = get_nid(pages[idx], i, false);
  591. if (!child_nid)
  592. continue;
  593. dn->nid = child_nid;
  594. err = truncate_dnode(dn);
  595. if (err < 0)
  596. goto fail;
  597. set_nid(pages[idx], i, 0, false);
  598. }
  599. if (offset[idx + 1] == 0) {
  600. dn->node_page = pages[idx];
  601. dn->nid = nid[idx];
  602. truncate_node(dn);
  603. } else {
  604. f2fs_put_page(pages[idx], 1);
  605. }
  606. offset[idx]++;
  607. offset[idx + 1] = 0;
  608. idx--;
  609. fail:
  610. for (i = idx; i >= 0; i--)
  611. f2fs_put_page(pages[i], 1);
  612. trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
  613. return err;
  614. }
  615. /*
  616. * All the block addresses of data and nodes should be nullified.
  617. */
  618. int truncate_inode_blocks(struct inode *inode, pgoff_t from)
  619. {
  620. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  621. int err = 0, cont = 1;
  622. int level, offset[4], noffset[4];
  623. unsigned int nofs = 0;
  624. struct f2fs_inode *ri;
  625. struct dnode_of_data dn;
  626. struct page *page;
  627. trace_f2fs_truncate_inode_blocks_enter(inode, from);
  628. level = get_node_path(F2FS_I(inode), from, offset, noffset);
  629. restart:
  630. page = get_node_page(sbi, inode->i_ino);
  631. if (IS_ERR(page)) {
  632. trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
  633. return PTR_ERR(page);
  634. }
  635. set_new_dnode(&dn, inode, page, NULL, 0);
  636. unlock_page(page);
  637. ri = F2FS_INODE(page);
  638. switch (level) {
  639. case 0:
  640. case 1:
  641. nofs = noffset[1];
  642. break;
  643. case 2:
  644. nofs = noffset[1];
  645. if (!offset[level - 1])
  646. goto skip_partial;
  647. err = truncate_partial_nodes(&dn, ri, offset, level);
  648. if (err < 0 && err != -ENOENT)
  649. goto fail;
  650. nofs += 1 + NIDS_PER_BLOCK;
  651. break;
  652. case 3:
  653. nofs = 5 + 2 * NIDS_PER_BLOCK;
  654. if (!offset[level - 1])
  655. goto skip_partial;
  656. err = truncate_partial_nodes(&dn, ri, offset, level);
  657. if (err < 0 && err != -ENOENT)
  658. goto fail;
  659. break;
  660. default:
  661. BUG();
  662. }
  663. skip_partial:
  664. while (cont) {
  665. dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
  666. switch (offset[0]) {
  667. case NODE_DIR1_BLOCK:
  668. case NODE_DIR2_BLOCK:
  669. err = truncate_dnode(&dn);
  670. break;
  671. case NODE_IND1_BLOCK:
  672. case NODE_IND2_BLOCK:
  673. err = truncate_nodes(&dn, nofs, offset[1], 2);
  674. break;
  675. case NODE_DIND_BLOCK:
  676. err = truncate_nodes(&dn, nofs, offset[1], 3);
  677. cont = 0;
  678. break;
  679. default:
  680. BUG();
  681. }
  682. if (err < 0 && err != -ENOENT)
  683. goto fail;
  684. if (offset[1] == 0 &&
  685. ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
  686. lock_page(page);
  687. if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
  688. f2fs_put_page(page, 1);
  689. goto restart;
  690. }
  691. f2fs_wait_on_page_writeback(page, NODE);
  692. ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
  693. set_page_dirty(page);
  694. unlock_page(page);
  695. }
  696. offset[1] = 0;
  697. offset[0]++;
  698. nofs += err;
  699. }
  700. fail:
  701. f2fs_put_page(page, 0);
  702. trace_f2fs_truncate_inode_blocks_exit(inode, err);
  703. return err > 0 ? 0 : err;
  704. }
  705. int truncate_xattr_node(struct inode *inode, struct page *page)
  706. {
  707. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  708. nid_t nid = F2FS_I(inode)->i_xattr_nid;
  709. struct dnode_of_data dn;
  710. struct page *npage;
  711. if (!nid)
  712. return 0;
  713. npage = get_node_page(sbi, nid);
  714. if (IS_ERR(npage))
  715. return PTR_ERR(npage);
  716. F2FS_I(inode)->i_xattr_nid = 0;
  717. /* need to do checkpoint during fsync */
  718. F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
  719. set_new_dnode(&dn, inode, page, npage, nid);
  720. if (page)
  721. dn.inode_page_locked = true;
  722. truncate_node(&dn);
  723. return 0;
  724. }
  725. /*
  726. * Caller should grab and release a rwsem by calling f2fs_lock_op() and
  727. * f2fs_unlock_op().
  728. */
  729. void remove_inode_page(struct inode *inode)
  730. {
  731. struct dnode_of_data dn;
  732. set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
  733. if (get_dnode_of_data(&dn, 0, LOOKUP_NODE))
  734. return;
  735. if (truncate_xattr_node(inode, dn.inode_page)) {
  736. f2fs_put_dnode(&dn);
  737. return;
  738. }
  739. /* remove potential inline_data blocks */
  740. if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  741. S_ISLNK(inode->i_mode))
  742. truncate_data_blocks_range(&dn, 1);
  743. /* 0 is possible, after f2fs_new_inode() has failed */
  744. f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
  745. /* will put inode & node pages */
  746. truncate_node(&dn);
  747. }
  748. struct page *new_inode_page(struct inode *inode)
  749. {
  750. struct dnode_of_data dn;
  751. /* allocate inode page for new inode */
  752. set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
  753. /* caller should f2fs_put_page(page, 1); */
  754. return new_node_page(&dn, 0, NULL);
  755. }
  756. struct page *new_node_page(struct dnode_of_data *dn,
  757. unsigned int ofs, struct page *ipage)
  758. {
  759. struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  760. struct node_info old_ni, new_ni;
  761. struct page *page;
  762. int err;
  763. if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
  764. return ERR_PTR(-EPERM);
  765. page = grab_cache_page(NODE_MAPPING(sbi), dn->nid);
  766. if (!page)
  767. return ERR_PTR(-ENOMEM);
  768. if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
  769. err = -ENOSPC;
  770. goto fail;
  771. }
  772. get_node_info(sbi, dn->nid, &old_ni);
  773. /* Reinitialize old_ni with new node page */
  774. f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
  775. new_ni = old_ni;
  776. new_ni.ino = dn->inode->i_ino;
  777. set_node_addr(sbi, &new_ni, NEW_ADDR, false);
  778. f2fs_wait_on_page_writeback(page, NODE);
  779. fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
  780. set_cold_node(dn->inode, page);
  781. SetPageUptodate(page);
  782. set_page_dirty(page);
  783. if (f2fs_has_xattr_block(ofs))
  784. F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
  785. dn->node_page = page;
  786. if (ipage)
  787. update_inode(dn->inode, ipage);
  788. else
  789. sync_inode_page(dn);
  790. if (ofs == 0)
  791. inc_valid_inode_count(sbi);
  792. return page;
  793. fail:
  794. clear_node_page_dirty(page);
  795. f2fs_put_page(page, 1);
  796. return ERR_PTR(err);
  797. }
  798. /*
  799. * Caller should do after getting the following values.
  800. * 0: f2fs_put_page(page, 0)
  801. * LOCKED_PAGE: f2fs_put_page(page, 1)
  802. * error: nothing
  803. */
  804. static int read_node_page(struct page *page, int rw)
  805. {
  806. struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
  807. struct node_info ni;
  808. get_node_info(sbi, page->index, &ni);
  809. if (unlikely(ni.blk_addr == NULL_ADDR)) {
  810. f2fs_put_page(page, 1);
  811. return -ENOENT;
  812. }
  813. if (PageUptodate(page))
  814. return LOCKED_PAGE;
  815. return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw);
  816. }
  817. /*
  818. * Readahead a node page
  819. */
  820. void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
  821. {
  822. struct page *apage;
  823. int err;
  824. apage = find_get_page(NODE_MAPPING(sbi), nid);
  825. if (apage && PageUptodate(apage)) {
  826. f2fs_put_page(apage, 0);
  827. return;
  828. }
  829. f2fs_put_page(apage, 0);
  830. apage = grab_cache_page(NODE_MAPPING(sbi), nid);
  831. if (!apage)
  832. return;
  833. err = read_node_page(apage, READA);
  834. if (err == 0)
  835. f2fs_put_page(apage, 0);
  836. else if (err == LOCKED_PAGE)
  837. f2fs_put_page(apage, 1);
  838. }
  839. struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
  840. {
  841. struct page *page;
  842. int err;
  843. repeat:
  844. page = grab_cache_page(NODE_MAPPING(sbi), nid);
  845. if (!page)
  846. return ERR_PTR(-ENOMEM);
  847. err = read_node_page(page, READ_SYNC);
  848. if (err < 0)
  849. return ERR_PTR(err);
  850. else if (err == LOCKED_PAGE)
  851. goto got_it;
  852. lock_page(page);
  853. if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
  854. f2fs_put_page(page, 1);
  855. return ERR_PTR(-EIO);
  856. }
  857. if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
  858. f2fs_put_page(page, 1);
  859. goto repeat;
  860. }
  861. got_it:
  862. return page;
  863. }
  864. /*
  865. * Return a locked page for the desired node page.
  866. * And, readahead MAX_RA_NODE number of node pages.
  867. */
  868. struct page *get_node_page_ra(struct page *parent, int start)
  869. {
  870. struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
  871. struct blk_plug plug;
  872. struct page *page;
  873. int err, i, end;
  874. nid_t nid;
  875. /* First, try getting the desired direct node. */
  876. nid = get_nid(parent, start, false);
  877. if (!nid)
  878. return ERR_PTR(-ENOENT);
  879. repeat:
  880. page = grab_cache_page(NODE_MAPPING(sbi), nid);
  881. if (!page)
  882. return ERR_PTR(-ENOMEM);
  883. err = read_node_page(page, READ_SYNC);
  884. if (err < 0)
  885. return ERR_PTR(err);
  886. else if (err == LOCKED_PAGE)
  887. goto page_hit;
  888. blk_start_plug(&plug);
  889. /* Then, try readahead for siblings of the desired node */
  890. end = start + MAX_RA_NODE;
  891. end = min(end, NIDS_PER_BLOCK);
  892. for (i = start + 1; i < end; i++) {
  893. nid = get_nid(parent, i, false);
  894. if (!nid)
  895. continue;
  896. ra_node_page(sbi, nid);
  897. }
  898. blk_finish_plug(&plug);
  899. lock_page(page);
  900. if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
  901. f2fs_put_page(page, 1);
  902. goto repeat;
  903. }
  904. page_hit:
  905. if (unlikely(!PageUptodate(page))) {
  906. f2fs_put_page(page, 1);
  907. return ERR_PTR(-EIO);
  908. }
  909. return page;
  910. }
  911. void sync_inode_page(struct dnode_of_data *dn)
  912. {
  913. if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
  914. update_inode(dn->inode, dn->node_page);
  915. } else if (dn->inode_page) {
  916. if (!dn->inode_page_locked)
  917. lock_page(dn->inode_page);
  918. update_inode(dn->inode, dn->inode_page);
  919. if (!dn->inode_page_locked)
  920. unlock_page(dn->inode_page);
  921. } else {
  922. update_inode_page(dn->inode);
  923. }
  924. }
  925. int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
  926. struct writeback_control *wbc)
  927. {
  928. pgoff_t index, end;
  929. struct pagevec pvec;
  930. int step = ino ? 2 : 0;
  931. int nwritten = 0, wrote = 0;
  932. pagevec_init(&pvec, 0);
  933. next_step:
  934. index = 0;
  935. end = LONG_MAX;
  936. while (index <= end) {
  937. int i, nr_pages;
  938. nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
  939. PAGECACHE_TAG_DIRTY,
  940. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
  941. if (nr_pages == 0)
  942. break;
  943. for (i = 0; i < nr_pages; i++) {
  944. struct page *page = pvec.pages[i];
  945. /*
  946. * flushing sequence with step:
  947. * 0. indirect nodes
  948. * 1. dentry dnodes
  949. * 2. file dnodes
  950. */
  951. if (step == 0 && IS_DNODE(page))
  952. continue;
  953. if (step == 1 && (!IS_DNODE(page) ||
  954. is_cold_node(page)))
  955. continue;
  956. if (step == 2 && (!IS_DNODE(page) ||
  957. !is_cold_node(page)))
  958. continue;
  959. /*
  960. * If an fsync mode,
  961. * we should not skip writing node pages.
  962. */
  963. if (ino && ino_of_node(page) == ino)
  964. lock_page(page);
  965. else if (!trylock_page(page))
  966. continue;
  967. if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
  968. continue_unlock:
  969. unlock_page(page);
  970. continue;
  971. }
  972. if (ino && ino_of_node(page) != ino)
  973. goto continue_unlock;
  974. if (!PageDirty(page)) {
  975. /* someone wrote it for us */
  976. goto continue_unlock;
  977. }
  978. if (!clear_page_dirty_for_io(page))
  979. goto continue_unlock;
  980. /* called by fsync() */
  981. if (ino && IS_DNODE(page)) {
  982. int mark = !is_checkpointed_node(sbi, ino);
  983. set_fsync_mark(page, 1);
  984. if (IS_INODE(page))
  985. set_dentry_mark(page, mark);
  986. nwritten++;
  987. } else {
  988. set_fsync_mark(page, 0);
  989. set_dentry_mark(page, 0);
  990. }
  991. if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
  992. unlock_page(page);
  993. else
  994. wrote++;
  995. if (--wbc->nr_to_write == 0)
  996. break;
  997. }
  998. pagevec_release(&pvec);
  999. cond_resched();
  1000. if (wbc->nr_to_write == 0) {
  1001. step = 2;
  1002. break;
  1003. }
  1004. }
  1005. if (step < 2) {
  1006. step++;
  1007. goto next_step;
  1008. }
  1009. if (wrote)
  1010. f2fs_submit_merged_bio(sbi, NODE, WRITE);
  1011. return nwritten;
  1012. }
  1013. int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
  1014. {
  1015. pgoff_t index = 0, end = LONG_MAX;
  1016. struct pagevec pvec;
  1017. int ret2 = 0, ret = 0;
  1018. pagevec_init(&pvec, 0);
  1019. while (index <= end) {
  1020. int i, nr_pages;
  1021. nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
  1022. PAGECACHE_TAG_WRITEBACK,
  1023. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
  1024. if (nr_pages == 0)
  1025. break;
  1026. for (i = 0; i < nr_pages; i++) {
  1027. struct page *page = pvec.pages[i];
  1028. /* until radix tree lookup accepts end_index */
  1029. if (unlikely(page->index > end))
  1030. continue;
  1031. if (ino && ino_of_node(page) == ino) {
  1032. f2fs_wait_on_page_writeback(page, NODE);
  1033. if (TestClearPageError(page))
  1034. ret = -EIO;
  1035. }
  1036. }
  1037. pagevec_release(&pvec);
  1038. cond_resched();
  1039. }
  1040. if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
  1041. ret2 = -ENOSPC;
  1042. if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
  1043. ret2 = -EIO;
  1044. if (!ret)
  1045. ret = ret2;
  1046. return ret;
  1047. }
  1048. static int f2fs_write_node_page(struct page *page,
  1049. struct writeback_control *wbc)
  1050. {
  1051. struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
  1052. nid_t nid;
  1053. block_t new_addr;
  1054. struct node_info ni;
  1055. struct f2fs_io_info fio = {
  1056. .type = NODE,
  1057. .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
  1058. };
  1059. trace_f2fs_writepage(page, NODE);
  1060. if (unlikely(sbi->por_doing))
  1061. goto redirty_out;
  1062. if (unlikely(f2fs_cp_error(sbi)))
  1063. goto redirty_out;
  1064. f2fs_wait_on_page_writeback(page, NODE);
  1065. /* get old block addr of this node page */
  1066. nid = nid_of_node(page);
  1067. f2fs_bug_on(page->index != nid);
  1068. get_node_info(sbi, nid, &ni);
  1069. /* This page is already truncated */
  1070. if (unlikely(ni.blk_addr == NULL_ADDR)) {
  1071. dec_page_count(sbi, F2FS_DIRTY_NODES);
  1072. unlock_page(page);
  1073. return 0;
  1074. }
  1075. if (wbc->for_reclaim)
  1076. goto redirty_out;
  1077. down_read(&sbi->node_write);
  1078. set_page_writeback(page);
  1079. write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr);
  1080. set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page));
  1081. dec_page_count(sbi, F2FS_DIRTY_NODES);
  1082. up_read(&sbi->node_write);
  1083. unlock_page(page);
  1084. return 0;
  1085. redirty_out:
  1086. redirty_page_for_writepage(wbc, page);
  1087. return AOP_WRITEPAGE_ACTIVATE;
  1088. }
  1089. static int f2fs_write_node_pages(struct address_space *mapping,
  1090. struct writeback_control *wbc)
  1091. {
  1092. struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
  1093. long diff;
  1094. trace_f2fs_writepages(mapping->host, wbc, NODE);
  1095. /* balancing f2fs's metadata in background */
  1096. f2fs_balance_fs_bg(sbi);
  1097. /* collect a number of dirty node pages and write together */
  1098. if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
  1099. goto skip_write;
  1100. diff = nr_pages_to_write(sbi, NODE, wbc);
  1101. wbc->sync_mode = WB_SYNC_NONE;
  1102. sync_node_pages(sbi, 0, wbc);
  1103. wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
  1104. return 0;
  1105. skip_write:
  1106. wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
  1107. return 0;
  1108. }
  1109. static int f2fs_set_node_page_dirty(struct page *page)
  1110. {
  1111. struct address_space *mapping = page->mapping;
  1112. struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
  1113. trace_f2fs_set_page_dirty(page, NODE);
  1114. SetPageUptodate(page);
  1115. if (!PageDirty(page)) {
  1116. __set_page_dirty_nobuffers(page);
  1117. inc_page_count(sbi, F2FS_DIRTY_NODES);
  1118. SetPagePrivate(page);
  1119. return 1;
  1120. }
  1121. return 0;
  1122. }
  1123. static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
  1124. unsigned int length)
  1125. {
  1126. struct inode *inode = page->mapping->host;
  1127. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  1128. if (PageDirty(page))
  1129. dec_page_count(sbi, F2FS_DIRTY_NODES);
  1130. ClearPagePrivate(page);
  1131. }
  1132. static int f2fs_release_node_page(struct page *page, gfp_t wait)
  1133. {
  1134. ClearPagePrivate(page);
  1135. return 1;
  1136. }
  1137. /*
  1138. * Structure of the f2fs node operations
  1139. */
  1140. const struct address_space_operations f2fs_node_aops = {
  1141. .writepage = f2fs_write_node_page,
  1142. .writepages = f2fs_write_node_pages,
  1143. .set_page_dirty = f2fs_set_node_page_dirty,
  1144. .invalidatepage = f2fs_invalidate_node_page,
  1145. .releasepage = f2fs_release_node_page,
  1146. };
  1147. static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
  1148. nid_t n)
  1149. {
  1150. return radix_tree_lookup(&nm_i->free_nid_root, n);
  1151. }
  1152. static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
  1153. struct free_nid *i)
  1154. {
  1155. list_del(&i->list);
  1156. radix_tree_delete(&nm_i->free_nid_root, i->nid);
  1157. }
  1158. static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
  1159. {
  1160. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1161. struct free_nid *i;
  1162. struct nat_entry *ne;
  1163. bool allocated = false;
  1164. if (!available_free_memory(sbi, FREE_NIDS))
  1165. return -1;
  1166. /* 0 nid should not be used */
  1167. if (unlikely(nid == 0))
  1168. return 0;
  1169. if (build) {
  1170. /* do not add allocated nids */
  1171. read_lock(&nm_i->nat_tree_lock);
  1172. ne = __lookup_nat_cache(nm_i, nid);
  1173. if (ne &&
  1174. (!ne->checkpointed || nat_get_blkaddr(ne) != NULL_ADDR))
  1175. allocated = true;
  1176. read_unlock(&nm_i->nat_tree_lock);
  1177. if (allocated)
  1178. return 0;
  1179. }
  1180. i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
  1181. i->nid = nid;
  1182. i->state = NID_NEW;
  1183. spin_lock(&nm_i->free_nid_list_lock);
  1184. if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
  1185. spin_unlock(&nm_i->free_nid_list_lock);
  1186. kmem_cache_free(free_nid_slab, i);
  1187. return 0;
  1188. }
  1189. list_add_tail(&i->list, &nm_i->free_nid_list);
  1190. nm_i->fcnt++;
  1191. spin_unlock(&nm_i->free_nid_list_lock);
  1192. return 1;
  1193. }
  1194. static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
  1195. {
  1196. struct free_nid *i;
  1197. bool need_free = false;
  1198. spin_lock(&nm_i->free_nid_list_lock);
  1199. i = __lookup_free_nid_list(nm_i, nid);
  1200. if (i && i->state == NID_NEW) {
  1201. __del_from_free_nid_list(nm_i, i);
  1202. nm_i->fcnt--;
  1203. need_free = true;
  1204. }
  1205. spin_unlock(&nm_i->free_nid_list_lock);
  1206. if (need_free)
  1207. kmem_cache_free(free_nid_slab, i);
  1208. }
  1209. static void scan_nat_page(struct f2fs_sb_info *sbi,
  1210. struct page *nat_page, nid_t start_nid)
  1211. {
  1212. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1213. struct f2fs_nat_block *nat_blk = page_address(nat_page);
  1214. block_t blk_addr;
  1215. int i;
  1216. i = start_nid % NAT_ENTRY_PER_BLOCK;
  1217. for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
  1218. if (unlikely(start_nid >= nm_i->max_nid))
  1219. break;
  1220. blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
  1221. f2fs_bug_on(blk_addr == NEW_ADDR);
  1222. if (blk_addr == NULL_ADDR) {
  1223. if (add_free_nid(sbi, start_nid, true) < 0)
  1224. break;
  1225. }
  1226. }
  1227. }
  1228. static void build_free_nids(struct f2fs_sb_info *sbi)
  1229. {
  1230. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1231. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
  1232. struct f2fs_summary_block *sum = curseg->sum_blk;
  1233. int i = 0;
  1234. nid_t nid = nm_i->next_scan_nid;
  1235. /* Enough entries */
  1236. if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
  1237. return;
  1238. /* readahead nat pages to be scanned */
  1239. ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT);
  1240. while (1) {
  1241. struct page *page = get_current_nat_page(sbi, nid);
  1242. scan_nat_page(sbi, page, nid);
  1243. f2fs_put_page(page, 1);
  1244. nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
  1245. if (unlikely(nid >= nm_i->max_nid))
  1246. nid = 0;
  1247. if (i++ == FREE_NID_PAGES)
  1248. break;
  1249. }
  1250. /* go to the next free nat pages to find free nids abundantly */
  1251. nm_i->next_scan_nid = nid;
  1252. /* find free nids from current sum_pages */
  1253. mutex_lock(&curseg->curseg_mutex);
  1254. for (i = 0; i < nats_in_cursum(sum); i++) {
  1255. block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
  1256. nid = le32_to_cpu(nid_in_journal(sum, i));
  1257. if (addr == NULL_ADDR)
  1258. add_free_nid(sbi, nid, true);
  1259. else
  1260. remove_free_nid(nm_i, nid);
  1261. }
  1262. mutex_unlock(&curseg->curseg_mutex);
  1263. }
  1264. /*
  1265. * If this function returns success, caller can obtain a new nid
  1266. * from second parameter of this function.
  1267. * The returned nid could be used ino as well as nid when inode is created.
  1268. */
  1269. bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
  1270. {
  1271. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1272. struct free_nid *i = NULL;
  1273. retry:
  1274. if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
  1275. return false;
  1276. spin_lock(&nm_i->free_nid_list_lock);
  1277. /* We should not use stale free nids created by build_free_nids */
  1278. if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
  1279. f2fs_bug_on(list_empty(&nm_i->free_nid_list));
  1280. list_for_each_entry(i, &nm_i->free_nid_list, list)
  1281. if (i->state == NID_NEW)
  1282. break;
  1283. f2fs_bug_on(i->state != NID_NEW);
  1284. *nid = i->nid;
  1285. i->state = NID_ALLOC;
  1286. nm_i->fcnt--;
  1287. spin_unlock(&nm_i->free_nid_list_lock);
  1288. return true;
  1289. }
  1290. spin_unlock(&nm_i->free_nid_list_lock);
  1291. /* Let's scan nat pages and its caches to get free nids */
  1292. mutex_lock(&nm_i->build_lock);
  1293. build_free_nids(sbi);
  1294. mutex_unlock(&nm_i->build_lock);
  1295. goto retry;
  1296. }
  1297. /*
  1298. * alloc_nid() should be called prior to this function.
  1299. */
  1300. void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
  1301. {
  1302. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1303. struct free_nid *i;
  1304. spin_lock(&nm_i->free_nid_list_lock);
  1305. i = __lookup_free_nid_list(nm_i, nid);
  1306. f2fs_bug_on(!i || i->state != NID_ALLOC);
  1307. __del_from_free_nid_list(nm_i, i);
  1308. spin_unlock(&nm_i->free_nid_list_lock);
  1309. kmem_cache_free(free_nid_slab, i);
  1310. }
  1311. /*
  1312. * alloc_nid() should be called prior to this function.
  1313. */
  1314. void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
  1315. {
  1316. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1317. struct free_nid *i;
  1318. bool need_free = false;
  1319. if (!nid)
  1320. return;
  1321. spin_lock(&nm_i->free_nid_list_lock);
  1322. i = __lookup_free_nid_list(nm_i, nid);
  1323. f2fs_bug_on(!i || i->state != NID_ALLOC);
  1324. if (!available_free_memory(sbi, FREE_NIDS)) {
  1325. __del_from_free_nid_list(nm_i, i);
  1326. need_free = true;
  1327. } else {
  1328. i->state = NID_NEW;
  1329. nm_i->fcnt++;
  1330. }
  1331. spin_unlock(&nm_i->free_nid_list_lock);
  1332. if (need_free)
  1333. kmem_cache_free(free_nid_slab, i);
  1334. }
  1335. void recover_inline_xattr(struct inode *inode, struct page *page)
  1336. {
  1337. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  1338. void *src_addr, *dst_addr;
  1339. size_t inline_size;
  1340. struct page *ipage;
  1341. struct f2fs_inode *ri;
  1342. ipage = get_node_page(sbi, inode->i_ino);
  1343. f2fs_bug_on(IS_ERR(ipage));
  1344. ri = F2FS_INODE(page);
  1345. if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
  1346. clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR);
  1347. goto update_inode;
  1348. }
  1349. dst_addr = inline_xattr_addr(ipage);
  1350. src_addr = inline_xattr_addr(page);
  1351. inline_size = inline_xattr_size(inode);
  1352. f2fs_wait_on_page_writeback(ipage, NODE);
  1353. memcpy(dst_addr, src_addr, inline_size);
  1354. update_inode:
  1355. update_inode(inode, ipage);
  1356. f2fs_put_page(ipage, 1);
  1357. }
  1358. void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
  1359. {
  1360. struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  1361. nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
  1362. nid_t new_xnid = nid_of_node(page);
  1363. struct node_info ni;
  1364. /* 1: invalidate the previous xattr nid */
  1365. if (!prev_xnid)
  1366. goto recover_xnid;
  1367. /* Deallocate node address */
  1368. get_node_info(sbi, prev_xnid, &ni);
  1369. f2fs_bug_on(ni.blk_addr == NULL_ADDR);
  1370. invalidate_blocks(sbi, ni.blk_addr);
  1371. dec_valid_node_count(sbi, inode);
  1372. set_node_addr(sbi, &ni, NULL_ADDR, false);
  1373. recover_xnid:
  1374. /* 2: allocate new xattr nid */
  1375. if (unlikely(!inc_valid_node_count(sbi, inode)))
  1376. f2fs_bug_on(1);
  1377. remove_free_nid(NM_I(sbi), new_xnid);
  1378. get_node_info(sbi, new_xnid, &ni);
  1379. ni.ino = inode->i_ino;
  1380. set_node_addr(sbi, &ni, NEW_ADDR, false);
  1381. F2FS_I(inode)->i_xattr_nid = new_xnid;
  1382. /* 3: update xattr blkaddr */
  1383. refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
  1384. set_node_addr(sbi, &ni, blkaddr, false);
  1385. update_inode_page(inode);
  1386. }
  1387. int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
  1388. {
  1389. struct f2fs_inode *src, *dst;
  1390. nid_t ino = ino_of_node(page);
  1391. struct node_info old_ni, new_ni;
  1392. struct page *ipage;
  1393. get_node_info(sbi, ino, &old_ni);
  1394. if (unlikely(old_ni.blk_addr != NULL_ADDR))
  1395. return -EINVAL;
  1396. ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
  1397. if (!ipage)
  1398. return -ENOMEM;
  1399. /* Should not use this inode from free nid list */
  1400. remove_free_nid(NM_I(sbi), ino);
  1401. SetPageUptodate(ipage);
  1402. fill_node_footer(ipage, ino, ino, 0, true);
  1403. src = F2FS_INODE(page);
  1404. dst = F2FS_INODE(ipage);
  1405. memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
  1406. dst->i_size = 0;
  1407. dst->i_blocks = cpu_to_le64(1);
  1408. dst->i_links = cpu_to_le32(1);
  1409. dst->i_xattr_nid = 0;
  1410. dst->i_inline = src->i_inline & F2FS_INLINE_XATTR;
  1411. new_ni = old_ni;
  1412. new_ni.ino = ino;
  1413. if (unlikely(!inc_valid_node_count(sbi, NULL)))
  1414. WARN_ON(1);
  1415. set_node_addr(sbi, &new_ni, NEW_ADDR, false);
  1416. inc_valid_inode_count(sbi);
  1417. set_page_dirty(ipage);
  1418. f2fs_put_page(ipage, 1);
  1419. return 0;
  1420. }
  1421. /*
  1422. * ra_sum_pages() merge contiguous pages into one bio and submit.
  1423. * these pre-read pages are allocated in bd_inode's mapping tree.
  1424. */
  1425. static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages,
  1426. int start, int nrpages)
  1427. {
  1428. struct inode *inode = sbi->sb->s_bdev->bd_inode;
  1429. struct address_space *mapping = inode->i_mapping;
  1430. int i, page_idx = start;
  1431. struct f2fs_io_info fio = {
  1432. .type = META,
  1433. .rw = READ_SYNC | REQ_META | REQ_PRIO
  1434. };
  1435. for (i = 0; page_idx < start + nrpages; page_idx++, i++) {
  1436. /* alloc page in bd_inode for reading node summary info */
  1437. pages[i] = grab_cache_page(mapping, page_idx);
  1438. if (!pages[i])
  1439. break;
  1440. f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio);
  1441. }
  1442. f2fs_submit_merged_bio(sbi, META, READ);
  1443. return i;
  1444. }
  1445. int restore_node_summary(struct f2fs_sb_info *sbi,
  1446. unsigned int segno, struct f2fs_summary_block *sum)
  1447. {
  1448. struct f2fs_node *rn;
  1449. struct f2fs_summary *sum_entry;
  1450. struct inode *inode = sbi->sb->s_bdev->bd_inode;
  1451. block_t addr;
  1452. int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
  1453. struct page *pages[bio_blocks];
  1454. int i, idx, last_offset, nrpages, err = 0;
  1455. /* scan the node segment */
  1456. last_offset = sbi->blocks_per_seg;
  1457. addr = START_BLOCK(sbi, segno);
  1458. sum_entry = &sum->entries[0];
  1459. for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) {
  1460. nrpages = min(last_offset - i, bio_blocks);
  1461. /* readahead node pages */
  1462. nrpages = ra_sum_pages(sbi, pages, addr, nrpages);
  1463. if (!nrpages)
  1464. return -ENOMEM;
  1465. for (idx = 0; idx < nrpages; idx++) {
  1466. if (err)
  1467. goto skip;
  1468. lock_page(pages[idx]);
  1469. if (unlikely(!PageUptodate(pages[idx]))) {
  1470. err = -EIO;
  1471. } else {
  1472. rn = F2FS_NODE(pages[idx]);
  1473. sum_entry->nid = rn->footer.nid;
  1474. sum_entry->version = 0;
  1475. sum_entry->ofs_in_node = 0;
  1476. sum_entry++;
  1477. }
  1478. unlock_page(pages[idx]);
  1479. skip:
  1480. page_cache_release(pages[idx]);
  1481. }
  1482. invalidate_mapping_pages(inode->i_mapping, addr,
  1483. addr + nrpages);
  1484. }
  1485. return err;
  1486. }
  1487. static struct nat_entry_set *grab_nat_entry_set(void)
  1488. {
  1489. struct nat_entry_set *nes =
  1490. f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC);
  1491. nes->entry_cnt = 0;
  1492. INIT_LIST_HEAD(&nes->set_list);
  1493. INIT_LIST_HEAD(&nes->entry_list);
  1494. return nes;
  1495. }
  1496. static void release_nat_entry_set(struct nat_entry_set *nes,
  1497. struct f2fs_nm_info *nm_i)
  1498. {
  1499. f2fs_bug_on(!list_empty(&nes->entry_list));
  1500. nm_i->dirty_nat_cnt -= nes->entry_cnt;
  1501. list_del(&nes->set_list);
  1502. kmem_cache_free(nat_entry_set_slab, nes);
  1503. }
  1504. static void adjust_nat_entry_set(struct nat_entry_set *nes,
  1505. struct list_head *head)
  1506. {
  1507. struct nat_entry_set *next = nes;
  1508. if (list_is_last(&nes->set_list, head))
  1509. return;
  1510. list_for_each_entry_continue(next, head, set_list)
  1511. if (nes->entry_cnt <= next->entry_cnt)
  1512. break;
  1513. list_move_tail(&nes->set_list, &next->set_list);
  1514. }
  1515. static void add_nat_entry(struct nat_entry *ne, struct list_head *head)
  1516. {
  1517. struct nat_entry_set *nes;
  1518. nid_t start_nid = START_NID(ne->ni.nid);
  1519. list_for_each_entry(nes, head, set_list) {
  1520. if (nes->start_nid == start_nid) {
  1521. list_move_tail(&ne->list, &nes->entry_list);
  1522. nes->entry_cnt++;
  1523. adjust_nat_entry_set(nes, head);
  1524. return;
  1525. }
  1526. }
  1527. nes = grab_nat_entry_set();
  1528. nes->start_nid = start_nid;
  1529. list_move_tail(&ne->list, &nes->entry_list);
  1530. nes->entry_cnt++;
  1531. list_add(&nes->set_list, head);
  1532. }
  1533. static void merge_nats_in_set(struct f2fs_sb_info *sbi)
  1534. {
  1535. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1536. struct list_head *dirty_list = &nm_i->dirty_nat_entries;
  1537. struct list_head *set_list = &nm_i->nat_entry_set;
  1538. struct nat_entry *ne, *tmp;
  1539. write_lock(&nm_i->nat_tree_lock);
  1540. list_for_each_entry_safe(ne, tmp, dirty_list, list) {
  1541. if (nat_get_blkaddr(ne) == NEW_ADDR)
  1542. continue;
  1543. add_nat_entry(ne, set_list);
  1544. nm_i->dirty_nat_cnt++;
  1545. }
  1546. write_unlock(&nm_i->nat_tree_lock);
  1547. }
  1548. static bool __has_cursum_space(struct f2fs_summary_block *sum, int size)
  1549. {
  1550. if (nats_in_cursum(sum) + size <= NAT_JOURNAL_ENTRIES)
  1551. return true;
  1552. else
  1553. return false;
  1554. }
  1555. static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
  1556. {
  1557. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1558. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
  1559. struct f2fs_summary_block *sum = curseg->sum_blk;
  1560. int i;
  1561. mutex_lock(&curseg->curseg_mutex);
  1562. for (i = 0; i < nats_in_cursum(sum); i++) {
  1563. struct nat_entry *ne;
  1564. struct f2fs_nat_entry raw_ne;
  1565. nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
  1566. raw_ne = nat_in_journal(sum, i);
  1567. retry:
  1568. write_lock(&nm_i->nat_tree_lock);
  1569. ne = __lookup_nat_cache(nm_i, nid);
  1570. if (ne)
  1571. goto found;
  1572. ne = grab_nat_entry(nm_i, nid);
  1573. if (!ne) {
  1574. write_unlock(&nm_i->nat_tree_lock);
  1575. goto retry;
  1576. }
  1577. node_info_from_raw_nat(&ne->ni, &raw_ne);
  1578. found:
  1579. __set_nat_cache_dirty(nm_i, ne);
  1580. write_unlock(&nm_i->nat_tree_lock);
  1581. }
  1582. update_nats_in_cursum(sum, -i);
  1583. mutex_unlock(&curseg->curseg_mutex);
  1584. }
  1585. /*
  1586. * This function is called during the checkpointing process.
  1587. */
  1588. void flush_nat_entries(struct f2fs_sb_info *sbi)
  1589. {
  1590. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1591. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
  1592. struct f2fs_summary_block *sum = curseg->sum_blk;
  1593. struct nat_entry_set *nes, *tmp;
  1594. struct list_head *head = &nm_i->nat_entry_set;
  1595. bool to_journal = true;
  1596. /* merge nat entries of dirty list to nat entry set temporarily */
  1597. merge_nats_in_set(sbi);
  1598. /*
  1599. * if there are no enough space in journal to store dirty nat
  1600. * entries, remove all entries from journal and merge them
  1601. * into nat entry set.
  1602. */
  1603. if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt)) {
  1604. remove_nats_in_journal(sbi);
  1605. /*
  1606. * merge nat entries of dirty list to nat entry set temporarily
  1607. */
  1608. merge_nats_in_set(sbi);
  1609. }
  1610. if (!nm_i->dirty_nat_cnt)
  1611. return;
  1612. /*
  1613. * there are two steps to flush nat entries:
  1614. * #1, flush nat entries to journal in current hot data summary block.
  1615. * #2, flush nat entries to nat page.
  1616. */
  1617. list_for_each_entry_safe(nes, tmp, head, set_list) {
  1618. struct f2fs_nat_block *nat_blk;
  1619. struct nat_entry *ne, *cur;
  1620. struct page *page;
  1621. nid_t start_nid = nes->start_nid;
  1622. if (to_journal && !__has_cursum_space(sum, nes->entry_cnt))
  1623. to_journal = false;
  1624. if (to_journal) {
  1625. mutex_lock(&curseg->curseg_mutex);
  1626. } else {
  1627. page = get_next_nat_page(sbi, start_nid);
  1628. nat_blk = page_address(page);
  1629. f2fs_bug_on(!nat_blk);
  1630. }
  1631. /* flush dirty nats in nat entry set */
  1632. list_for_each_entry_safe(ne, cur, &nes->entry_list, list) {
  1633. struct f2fs_nat_entry *raw_ne;
  1634. nid_t nid = nat_get_nid(ne);
  1635. int offset;
  1636. if (to_journal) {
  1637. offset = lookup_journal_in_cursum(sum,
  1638. NAT_JOURNAL, nid, 1);
  1639. f2fs_bug_on(offset < 0);
  1640. raw_ne = &nat_in_journal(sum, offset);
  1641. nid_in_journal(sum, offset) = cpu_to_le32(nid);
  1642. } else {
  1643. raw_ne = &nat_blk->entries[nid - start_nid];
  1644. }
  1645. raw_nat_from_node_info(raw_ne, &ne->ni);
  1646. if (nat_get_blkaddr(ne) == NULL_ADDR &&
  1647. add_free_nid(sbi, nid, false) <= 0) {
  1648. write_lock(&nm_i->nat_tree_lock);
  1649. __del_from_nat_cache(nm_i, ne);
  1650. write_unlock(&nm_i->nat_tree_lock);
  1651. } else {
  1652. write_lock(&nm_i->nat_tree_lock);
  1653. __clear_nat_cache_dirty(nm_i, ne);
  1654. write_unlock(&nm_i->nat_tree_lock);
  1655. }
  1656. }
  1657. if (to_journal)
  1658. mutex_unlock(&curseg->curseg_mutex);
  1659. else
  1660. f2fs_put_page(page, 1);
  1661. release_nat_entry_set(nes, nm_i);
  1662. }
  1663. f2fs_bug_on(!list_empty(head));
  1664. f2fs_bug_on(nm_i->dirty_nat_cnt);
  1665. }
  1666. static int init_node_manager(struct f2fs_sb_info *sbi)
  1667. {
  1668. struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
  1669. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1670. unsigned char *version_bitmap;
  1671. unsigned int nat_segs, nat_blocks;
  1672. nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
  1673. /* segment_count_nat includes pair segment so divide to 2. */
  1674. nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
  1675. nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
  1676. nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
  1677. /* not used nids: 0, node, meta, (and root counted as valid node) */
  1678. nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
  1679. nm_i->fcnt = 0;
  1680. nm_i->nat_cnt = 0;
  1681. nm_i->ram_thresh = DEF_RAM_THRESHOLD;
  1682. INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
  1683. INIT_LIST_HEAD(&nm_i->free_nid_list);
  1684. INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
  1685. INIT_LIST_HEAD(&nm_i->nat_entries);
  1686. INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
  1687. INIT_LIST_HEAD(&nm_i->nat_entry_set);
  1688. mutex_init(&nm_i->build_lock);
  1689. spin_lock_init(&nm_i->free_nid_list_lock);
  1690. rwlock_init(&nm_i->nat_tree_lock);
  1691. nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
  1692. nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
  1693. version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
  1694. if (!version_bitmap)
  1695. return -EFAULT;
  1696. nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
  1697. GFP_KERNEL);
  1698. if (!nm_i->nat_bitmap)
  1699. return -ENOMEM;
  1700. return 0;
  1701. }
  1702. int build_node_manager(struct f2fs_sb_info *sbi)
  1703. {
  1704. int err;
  1705. sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
  1706. if (!sbi->nm_info)
  1707. return -ENOMEM;
  1708. err = init_node_manager(sbi);
  1709. if (err)
  1710. return err;
  1711. build_free_nids(sbi);
  1712. return 0;
  1713. }
  1714. void destroy_node_manager(struct f2fs_sb_info *sbi)
  1715. {
  1716. struct f2fs_nm_info *nm_i = NM_I(sbi);
  1717. struct free_nid *i, *next_i;
  1718. struct nat_entry *natvec[NATVEC_SIZE];
  1719. nid_t nid = 0;
  1720. unsigned int found;
  1721. if (!nm_i)
  1722. return;
  1723. /* destroy free nid list */
  1724. spin_lock(&nm_i->free_nid_list_lock);
  1725. list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
  1726. f2fs_bug_on(i->state == NID_ALLOC);
  1727. __del_from_free_nid_list(nm_i, i);
  1728. nm_i->fcnt--;
  1729. spin_unlock(&nm_i->free_nid_list_lock);
  1730. kmem_cache_free(free_nid_slab, i);
  1731. spin_lock(&nm_i->free_nid_list_lock);
  1732. }
  1733. f2fs_bug_on(nm_i->fcnt);
  1734. spin_unlock(&nm_i->free_nid_list_lock);
  1735. /* destroy nat cache */
  1736. write_lock(&nm_i->nat_tree_lock);
  1737. while ((found = __gang_lookup_nat_cache(nm_i,
  1738. nid, NATVEC_SIZE, natvec))) {
  1739. unsigned idx;
  1740. nid = nat_get_nid(natvec[found - 1]) + 1;
  1741. for (idx = 0; idx < found; idx++)
  1742. __del_from_nat_cache(nm_i, natvec[idx]);
  1743. }
  1744. f2fs_bug_on(nm_i->nat_cnt);
  1745. write_unlock(&nm_i->nat_tree_lock);
  1746. kfree(nm_i->nat_bitmap);
  1747. sbi->nm_info = NULL;
  1748. kfree(nm_i);
  1749. }
  1750. int __init create_node_manager_caches(void)
  1751. {
  1752. nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
  1753. sizeof(struct nat_entry));
  1754. if (!nat_entry_slab)
  1755. goto fail;
  1756. free_nid_slab = f2fs_kmem_cache_create("free_nid",
  1757. sizeof(struct free_nid));
  1758. if (!free_nid_slab)
  1759. goto destory_nat_entry;
  1760. nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
  1761. sizeof(struct nat_entry_set));
  1762. if (!nat_entry_set_slab)
  1763. goto destory_free_nid;
  1764. return 0;
  1765. destory_free_nid:
  1766. kmem_cache_destroy(free_nid_slab);
  1767. destory_nat_entry:
  1768. kmem_cache_destroy(nat_entry_slab);
  1769. fail:
  1770. return -ENOMEM;
  1771. }
  1772. void destroy_node_manager_caches(void)
  1773. {
  1774. kmem_cache_destroy(nat_entry_set_slab);
  1775. kmem_cache_destroy(free_nid_slab);
  1776. kmem_cache_destroy(nat_entry_slab);
  1777. }