gc.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115
  1. /*
  2. * fs/f2fs/gc.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/module.h>
  13. #include <linux/backing-dev.h>
  14. #include <linux/init.h>
  15. #include <linux/f2fs_fs.h>
  16. #include <linux/kthread.h>
  17. #include <linux/delay.h>
  18. #include <linux/freezer.h>
  19. #include "f2fs.h"
  20. #include "node.h"
  21. #include "segment.h"
  22. #include "gc.h"
  23. #include <trace/events/f2fs.h>
  24. static int gc_thread_func(void *data)
  25. {
  26. struct f2fs_sb_info *sbi = data;
  27. struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
  28. wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
  29. unsigned int wait_ms;
  30. wait_ms = gc_th->min_sleep_time;
  31. set_freezable();
  32. do {
  33. wait_event_interruptible_timeout(*wq,
  34. kthread_should_stop() || freezing(current) ||
  35. gc_th->gc_wake,
  36. msecs_to_jiffies(wait_ms));
  37. /* give it a try one time */
  38. if (gc_th->gc_wake)
  39. gc_th->gc_wake = 0;
  40. if (try_to_freeze())
  41. continue;
  42. if (kthread_should_stop())
  43. break;
  44. if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
  45. increase_sleep_time(gc_th, &wait_ms);
  46. continue;
  47. }
  48. #ifdef CONFIG_F2FS_FAULT_INJECTION
  49. if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
  50. f2fs_show_injection_info(FAULT_CHECKPOINT);
  51. f2fs_stop_checkpoint(sbi, false);
  52. }
  53. #endif
  54. if (!sb_start_write_trylock(sbi->sb))
  55. continue;
  56. /*
  57. * [GC triggering condition]
  58. * 0. GC is not conducted currently.
  59. * 1. There are enough dirty segments.
  60. * 2. IO subsystem is idle by checking the # of writeback pages.
  61. * 3. IO subsystem is idle by checking the # of requests in
  62. * bdev's request list.
  63. *
  64. * Note) We have to avoid triggering GCs frequently.
  65. * Because it is possible that some segments can be
  66. * invalidated soon after by user update or deletion.
  67. * So, I'd like to wait some time to collect dirty segments.
  68. */
  69. if (!mutex_trylock(&sbi->gc_mutex))
  70. goto next;
  71. if (gc_th->gc_urgent) {
  72. wait_ms = gc_th->urgent_sleep_time;
  73. goto do_gc;
  74. }
  75. if (!is_idle(sbi)) {
  76. increase_sleep_time(gc_th, &wait_ms);
  77. mutex_unlock(&sbi->gc_mutex);
  78. goto next;
  79. }
  80. if (has_enough_invalid_blocks(sbi))
  81. decrease_sleep_time(gc_th, &wait_ms);
  82. else
  83. increase_sleep_time(gc_th, &wait_ms);
  84. do_gc:
  85. stat_inc_bggc_count(sbi);
  86. /* if return value is not zero, no victim was selected */
  87. if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
  88. wait_ms = gc_th->no_gc_sleep_time;
  89. trace_f2fs_background_gc(sbi->sb, wait_ms,
  90. prefree_segments(sbi), free_segments(sbi));
  91. /* balancing f2fs's metadata periodically */
  92. f2fs_balance_fs_bg(sbi);
  93. next:
  94. sb_end_write(sbi->sb);
  95. } while (!kthread_should_stop());
  96. return 0;
  97. }
  98. int start_gc_thread(struct f2fs_sb_info *sbi)
  99. {
  100. struct f2fs_gc_kthread *gc_th;
  101. dev_t dev = sbi->sb->s_bdev->bd_dev;
  102. int err = 0;
  103. gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
  104. if (!gc_th) {
  105. err = -ENOMEM;
  106. goto out;
  107. }
  108. gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
  109. gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
  110. gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
  111. gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
  112. gc_th->gc_idle = 0;
  113. gc_th->gc_urgent = 0;
  114. gc_th->gc_wake= 0;
  115. sbi->gc_thread = gc_th;
  116. init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
  117. sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
  118. "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
  119. if (IS_ERR(gc_th->f2fs_gc_task)) {
  120. err = PTR_ERR(gc_th->f2fs_gc_task);
  121. kfree(gc_th);
  122. sbi->gc_thread = NULL;
  123. }
  124. out:
  125. return err;
  126. }
  127. void stop_gc_thread(struct f2fs_sb_info *sbi)
  128. {
  129. struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
  130. if (!gc_th)
  131. return;
  132. kthread_stop(gc_th->f2fs_gc_task);
  133. kfree(gc_th);
  134. sbi->gc_thread = NULL;
  135. }
  136. static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
  137. {
  138. int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
  139. if (gc_th && gc_th->gc_idle) {
  140. if (gc_th->gc_idle == 1)
  141. gc_mode = GC_CB;
  142. else if (gc_th->gc_idle == 2)
  143. gc_mode = GC_GREEDY;
  144. }
  145. return gc_mode;
  146. }
  147. static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
  148. int type, struct victim_sel_policy *p)
  149. {
  150. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  151. if (p->alloc_mode == SSR) {
  152. p->gc_mode = GC_GREEDY;
  153. p->dirty_segmap = dirty_i->dirty_segmap[type];
  154. p->max_search = dirty_i->nr_dirty[type];
  155. p->ofs_unit = 1;
  156. } else {
  157. p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
  158. p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
  159. p->max_search = dirty_i->nr_dirty[DIRTY];
  160. p->ofs_unit = sbi->segs_per_sec;
  161. }
  162. /* we need to check every dirty segments in the FG_GC case */
  163. if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
  164. p->max_search = sbi->max_victim_search;
  165. /* let's select beginning hot/small space first */
  166. if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
  167. p->offset = 0;
  168. else
  169. p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
  170. }
  171. static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
  172. struct victim_sel_policy *p)
  173. {
  174. /* SSR allocates in a segment unit */
  175. if (p->alloc_mode == SSR)
  176. return sbi->blocks_per_seg;
  177. if (p->gc_mode == GC_GREEDY)
  178. return 2 * sbi->blocks_per_seg * p->ofs_unit;
  179. else if (p->gc_mode == GC_CB)
  180. return UINT_MAX;
  181. else /* No other gc_mode */
  182. return 0;
  183. }
  184. static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
  185. {
  186. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  187. unsigned int secno;
  188. /*
  189. * If the gc_type is FG_GC, we can select victim segments
  190. * selected by background GC before.
  191. * Those segments guarantee they have small valid blocks.
  192. */
  193. for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
  194. if (sec_usage_check(sbi, secno))
  195. continue;
  196. if (no_fggc_candidate(sbi, secno))
  197. continue;
  198. clear_bit(secno, dirty_i->victim_secmap);
  199. return GET_SEG_FROM_SEC(sbi, secno);
  200. }
  201. return NULL_SEGNO;
  202. }
  203. static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
  204. {
  205. struct sit_info *sit_i = SIT_I(sbi);
  206. unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
  207. unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
  208. unsigned long long mtime = 0;
  209. unsigned int vblocks;
  210. unsigned char age = 0;
  211. unsigned char u;
  212. unsigned int i;
  213. for (i = 0; i < sbi->segs_per_sec; i++)
  214. mtime += get_seg_entry(sbi, start + i)->mtime;
  215. vblocks = get_valid_blocks(sbi, segno, true);
  216. mtime = div_u64(mtime, sbi->segs_per_sec);
  217. vblocks = div_u64(vblocks, sbi->segs_per_sec);
  218. u = (vblocks * 100) >> sbi->log_blocks_per_seg;
  219. /* Handle if the system time has changed by the user */
  220. if (mtime < sit_i->min_mtime)
  221. sit_i->min_mtime = mtime;
  222. if (mtime > sit_i->max_mtime)
  223. sit_i->max_mtime = mtime;
  224. if (sit_i->max_mtime != sit_i->min_mtime)
  225. age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
  226. sit_i->max_mtime - sit_i->min_mtime);
  227. return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
  228. }
  229. static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
  230. unsigned int segno, struct victim_sel_policy *p)
  231. {
  232. if (p->alloc_mode == SSR)
  233. return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
  234. /* alloc_mode == LFS */
  235. if (p->gc_mode == GC_GREEDY)
  236. return get_valid_blocks(sbi, segno, true);
  237. else
  238. return get_cb_cost(sbi, segno);
  239. }
  240. static unsigned int count_bits(const unsigned long *addr,
  241. unsigned int offset, unsigned int len)
  242. {
  243. unsigned int end = offset + len, sum = 0;
  244. while (offset < end) {
  245. if (test_bit(offset++, addr))
  246. ++sum;
  247. }
  248. return sum;
  249. }
  250. /*
  251. * This function is called from two paths.
  252. * One is garbage collection and the other is SSR segment selection.
  253. * When it is called during GC, it just gets a victim segment
  254. * and it does not remove it from dirty seglist.
  255. * When it is called from SSR segment selection, it finds a segment
  256. * which has minimum valid blocks and removes it from dirty seglist.
  257. */
  258. static int get_victim_by_default(struct f2fs_sb_info *sbi,
  259. unsigned int *result, int gc_type, int type, char alloc_mode)
  260. {
  261. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  262. struct sit_info *sm = SIT_I(sbi);
  263. struct victim_sel_policy p;
  264. unsigned int secno, last_victim;
  265. unsigned int last_segment = MAIN_SEGS(sbi);
  266. unsigned int nsearched = 0;
  267. mutex_lock(&dirty_i->seglist_lock);
  268. p.alloc_mode = alloc_mode;
  269. select_policy(sbi, gc_type, type, &p);
  270. p.min_segno = NULL_SEGNO;
  271. p.min_cost = get_max_cost(sbi, &p);
  272. if (*result != NULL_SEGNO) {
  273. if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
  274. get_valid_blocks(sbi, *result, false) &&
  275. !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
  276. p.min_segno = *result;
  277. goto out;
  278. }
  279. if (p.max_search == 0)
  280. goto out;
  281. last_victim = sm->last_victim[p.gc_mode];
  282. if (p.alloc_mode == LFS && gc_type == FG_GC) {
  283. p.min_segno = check_bg_victims(sbi);
  284. if (p.min_segno != NULL_SEGNO)
  285. goto got_it;
  286. }
  287. while (1) {
  288. unsigned long cost;
  289. unsigned int segno;
  290. segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
  291. if (segno >= last_segment) {
  292. if (sm->last_victim[p.gc_mode]) {
  293. last_segment =
  294. sm->last_victim[p.gc_mode];
  295. sm->last_victim[p.gc_mode] = 0;
  296. p.offset = 0;
  297. continue;
  298. }
  299. break;
  300. }
  301. p.offset = segno + p.ofs_unit;
  302. if (p.ofs_unit > 1) {
  303. p.offset -= segno % p.ofs_unit;
  304. nsearched += count_bits(p.dirty_segmap,
  305. p.offset - p.ofs_unit,
  306. p.ofs_unit);
  307. } else {
  308. nsearched++;
  309. }
  310. secno = GET_SEC_FROM_SEG(sbi, segno);
  311. if (sec_usage_check(sbi, secno))
  312. goto next;
  313. if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
  314. goto next;
  315. if (gc_type == FG_GC && p.alloc_mode == LFS &&
  316. no_fggc_candidate(sbi, secno))
  317. goto next;
  318. cost = get_gc_cost(sbi, segno, &p);
  319. if (p.min_cost > cost) {
  320. p.min_segno = segno;
  321. p.min_cost = cost;
  322. }
  323. next:
  324. if (nsearched >= p.max_search) {
  325. if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
  326. sm->last_victim[p.gc_mode] = last_victim + 1;
  327. else
  328. sm->last_victim[p.gc_mode] = segno + 1;
  329. sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
  330. break;
  331. }
  332. }
  333. if (p.min_segno != NULL_SEGNO) {
  334. got_it:
  335. if (p.alloc_mode == LFS) {
  336. secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
  337. if (gc_type == FG_GC)
  338. sbi->cur_victim_sec = secno;
  339. else
  340. set_bit(secno, dirty_i->victim_secmap);
  341. }
  342. *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
  343. trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
  344. sbi->cur_victim_sec,
  345. prefree_segments(sbi), free_segments(sbi));
  346. }
  347. out:
  348. mutex_unlock(&dirty_i->seglist_lock);
  349. return (p.min_segno == NULL_SEGNO) ? 0 : 1;
  350. }
  351. static const struct victim_selection default_v_ops = {
  352. .get_victim = get_victim_by_default,
  353. };
  354. static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
  355. {
  356. struct inode_entry *ie;
  357. ie = radix_tree_lookup(&gc_list->iroot, ino);
  358. if (ie)
  359. return ie->inode;
  360. return NULL;
  361. }
  362. static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
  363. {
  364. struct inode_entry *new_ie;
  365. if (inode == find_gc_inode(gc_list, inode->i_ino)) {
  366. iput(inode);
  367. return;
  368. }
  369. new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
  370. new_ie->inode = inode;
  371. f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
  372. list_add_tail(&new_ie->list, &gc_list->ilist);
  373. }
  374. static void put_gc_inode(struct gc_inode_list *gc_list)
  375. {
  376. struct inode_entry *ie, *next_ie;
  377. list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
  378. radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
  379. iput(ie->inode);
  380. list_del(&ie->list);
  381. kmem_cache_free(inode_entry_slab, ie);
  382. }
  383. }
  384. static int check_valid_map(struct f2fs_sb_info *sbi,
  385. unsigned int segno, int offset)
  386. {
  387. struct sit_info *sit_i = SIT_I(sbi);
  388. struct seg_entry *sentry;
  389. int ret;
  390. down_read(&sit_i->sentry_lock);
  391. sentry = get_seg_entry(sbi, segno);
  392. ret = f2fs_test_bit(offset, sentry->cur_valid_map);
  393. up_read(&sit_i->sentry_lock);
  394. return ret;
  395. }
  396. /*
  397. * This function compares node address got in summary with that in NAT.
  398. * On validity, copy that node with cold status, otherwise (invalid node)
  399. * ignore that.
  400. */
  401. static void gc_node_segment(struct f2fs_sb_info *sbi,
  402. struct f2fs_summary *sum, unsigned int segno, int gc_type)
  403. {
  404. struct f2fs_summary *entry;
  405. block_t start_addr;
  406. int off;
  407. int phase = 0;
  408. start_addr = START_BLOCK(sbi, segno);
  409. next_step:
  410. entry = sum;
  411. for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
  412. nid_t nid = le32_to_cpu(entry->nid);
  413. struct page *node_page;
  414. struct node_info ni;
  415. /* stop BG_GC if there is not enough free sections. */
  416. if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
  417. return;
  418. if (check_valid_map(sbi, segno, off) == 0)
  419. continue;
  420. if (phase == 0) {
  421. ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
  422. META_NAT, true);
  423. continue;
  424. }
  425. if (phase == 1) {
  426. ra_node_page(sbi, nid);
  427. continue;
  428. }
  429. /* phase == 2 */
  430. node_page = get_node_page(sbi, nid);
  431. if (IS_ERR(node_page))
  432. continue;
  433. /* block may become invalid during get_node_page */
  434. if (check_valid_map(sbi, segno, off) == 0) {
  435. f2fs_put_page(node_page, 1);
  436. continue;
  437. }
  438. get_node_info(sbi, nid, &ni);
  439. if (ni.blk_addr != start_addr + off) {
  440. f2fs_put_page(node_page, 1);
  441. continue;
  442. }
  443. move_node_page(node_page, gc_type);
  444. stat_inc_node_blk_count(sbi, 1, gc_type);
  445. }
  446. if (++phase < 3)
  447. goto next_step;
  448. }
  449. /*
  450. * Calculate start block index indicating the given node offset.
  451. * Be careful, caller should give this node offset only indicating direct node
  452. * blocks. If any node offsets, which point the other types of node blocks such
  453. * as indirect or double indirect node blocks, are given, it must be a caller's
  454. * bug.
  455. */
  456. block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
  457. {
  458. unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
  459. unsigned int bidx;
  460. if (node_ofs == 0)
  461. return 0;
  462. if (node_ofs <= 2) {
  463. bidx = node_ofs - 1;
  464. } else if (node_ofs <= indirect_blks) {
  465. int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
  466. bidx = node_ofs - 2 - dec;
  467. } else {
  468. int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
  469. bidx = node_ofs - 5 - dec;
  470. }
  471. return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
  472. }
  473. static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
  474. struct node_info *dni, block_t blkaddr, unsigned int *nofs)
  475. {
  476. struct page *node_page;
  477. nid_t nid;
  478. unsigned int ofs_in_node;
  479. block_t source_blkaddr;
  480. nid = le32_to_cpu(sum->nid);
  481. ofs_in_node = le16_to_cpu(sum->ofs_in_node);
  482. node_page = get_node_page(sbi, nid);
  483. if (IS_ERR(node_page))
  484. return false;
  485. get_node_info(sbi, nid, dni);
  486. if (sum->version != dni->version) {
  487. f2fs_msg(sbi->sb, KERN_WARNING,
  488. "%s: valid data with mismatched node version.",
  489. __func__);
  490. set_sbi_flag(sbi, SBI_NEED_FSCK);
  491. }
  492. *nofs = ofs_of_node(node_page);
  493. source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
  494. f2fs_put_page(node_page, 1);
  495. if (source_blkaddr != blkaddr)
  496. return false;
  497. return true;
  498. }
  499. /*
  500. * Move data block via META_MAPPING while keeping locked data page.
  501. * This can be used to move blocks, aka LBAs, directly on disk.
  502. */
  503. static void move_data_block(struct inode *inode, block_t bidx,
  504. unsigned int segno, int off)
  505. {
  506. struct f2fs_io_info fio = {
  507. .sbi = F2FS_I_SB(inode),
  508. .ino = inode->i_ino,
  509. .type = DATA,
  510. .temp = COLD,
  511. .op = REQ_OP_READ,
  512. .op_flags = 0,
  513. .encrypted_page = NULL,
  514. .in_list = false,
  515. };
  516. struct dnode_of_data dn;
  517. struct f2fs_summary sum;
  518. struct node_info ni;
  519. struct page *page;
  520. block_t newaddr;
  521. int err;
  522. /* do not read out */
  523. page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
  524. if (!page)
  525. return;
  526. if (!check_valid_map(F2FS_I_SB(inode), segno, off))
  527. goto out;
  528. if (f2fs_is_atomic_file(inode))
  529. goto out;
  530. if (f2fs_is_pinned_file(inode)) {
  531. f2fs_pin_file_control(inode, true);
  532. goto out;
  533. }
  534. set_new_dnode(&dn, inode, NULL, NULL, 0);
  535. err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
  536. if (err)
  537. goto out;
  538. if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
  539. ClearPageUptodate(page);
  540. goto put_out;
  541. }
  542. /*
  543. * don't cache encrypted data into meta inode until previous dirty
  544. * data were writebacked to avoid racing between GC and flush.
  545. */
  546. f2fs_wait_on_page_writeback(page, DATA, true);
  547. get_node_info(fio.sbi, dn.nid, &ni);
  548. set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
  549. /* read page */
  550. fio.page = page;
  551. fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
  552. allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
  553. &sum, CURSEG_COLD_DATA, NULL, false);
  554. fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
  555. newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
  556. if (!fio.encrypted_page) {
  557. err = -ENOMEM;
  558. goto recover_block;
  559. }
  560. err = f2fs_submit_page_bio(&fio);
  561. if (err)
  562. goto put_page_out;
  563. /* write page */
  564. lock_page(fio.encrypted_page);
  565. if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
  566. err = -EIO;
  567. goto put_page_out;
  568. }
  569. if (unlikely(!PageUptodate(fio.encrypted_page))) {
  570. err = -EIO;
  571. goto put_page_out;
  572. }
  573. set_page_dirty(fio.encrypted_page);
  574. f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
  575. if (clear_page_dirty_for_io(fio.encrypted_page))
  576. dec_page_count(fio.sbi, F2FS_DIRTY_META);
  577. set_page_writeback(fio.encrypted_page);
  578. /* allocate block address */
  579. f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
  580. fio.op = REQ_OP_WRITE;
  581. fio.op_flags = REQ_SYNC;
  582. fio.new_blkaddr = newaddr;
  583. err = f2fs_submit_page_write(&fio);
  584. if (err) {
  585. if (PageWriteback(fio.encrypted_page))
  586. end_page_writeback(fio.encrypted_page);
  587. goto put_page_out;
  588. }
  589. f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
  590. f2fs_update_data_blkaddr(&dn, newaddr);
  591. set_inode_flag(inode, FI_APPEND_WRITE);
  592. if (page->index == 0)
  593. set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
  594. put_page_out:
  595. f2fs_put_page(fio.encrypted_page, 1);
  596. recover_block:
  597. if (err)
  598. __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
  599. true, true);
  600. put_out:
  601. f2fs_put_dnode(&dn);
  602. out:
  603. f2fs_put_page(page, 1);
  604. }
  605. static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
  606. unsigned int segno, int off)
  607. {
  608. struct page *page;
  609. page = get_lock_data_page(inode, bidx, true);
  610. if (IS_ERR(page))
  611. return;
  612. if (!check_valid_map(F2FS_I_SB(inode), segno, off))
  613. goto out;
  614. if (f2fs_is_atomic_file(inode))
  615. goto out;
  616. if (f2fs_is_pinned_file(inode)) {
  617. if (gc_type == FG_GC)
  618. f2fs_pin_file_control(inode, true);
  619. goto out;
  620. }
  621. if (gc_type == BG_GC) {
  622. if (PageWriteback(page))
  623. goto out;
  624. set_page_dirty(page);
  625. set_cold_data(page);
  626. } else {
  627. struct f2fs_io_info fio = {
  628. .sbi = F2FS_I_SB(inode),
  629. .ino = inode->i_ino,
  630. .type = DATA,
  631. .temp = COLD,
  632. .op = REQ_OP_WRITE,
  633. .op_flags = REQ_SYNC,
  634. .old_blkaddr = NULL_ADDR,
  635. .page = page,
  636. .encrypted_page = NULL,
  637. .need_lock = LOCK_REQ,
  638. .io_type = FS_GC_DATA_IO,
  639. };
  640. bool is_dirty = PageDirty(page);
  641. int err;
  642. retry:
  643. set_page_dirty(page);
  644. f2fs_wait_on_page_writeback(page, DATA, true);
  645. if (clear_page_dirty_for_io(page)) {
  646. inode_dec_dirty_pages(inode);
  647. remove_dirty_inode(inode);
  648. }
  649. set_cold_data(page);
  650. err = do_write_data_page(&fio);
  651. if (err == -ENOMEM && is_dirty) {
  652. congestion_wait(BLK_RW_ASYNC, HZ/50);
  653. goto retry;
  654. }
  655. }
  656. out:
  657. f2fs_put_page(page, 1);
  658. }
  659. /*
  660. * This function tries to get parent node of victim data block, and identifies
  661. * data block validity. If the block is valid, copy that with cold status and
  662. * modify parent node.
  663. * If the parent node is not valid or the data block address is different,
  664. * the victim data block is ignored.
  665. */
  666. static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
  667. struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
  668. {
  669. struct super_block *sb = sbi->sb;
  670. struct f2fs_summary *entry;
  671. block_t start_addr;
  672. int off;
  673. int phase = 0;
  674. start_addr = START_BLOCK(sbi, segno);
  675. next_step:
  676. entry = sum;
  677. for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
  678. struct page *data_page;
  679. struct inode *inode;
  680. struct node_info dni; /* dnode info for the data */
  681. unsigned int ofs_in_node, nofs;
  682. block_t start_bidx;
  683. nid_t nid = le32_to_cpu(entry->nid);
  684. /* stop BG_GC if there is not enough free sections. */
  685. if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
  686. return;
  687. if (check_valid_map(sbi, segno, off) == 0)
  688. continue;
  689. if (phase == 0) {
  690. ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
  691. META_NAT, true);
  692. continue;
  693. }
  694. if (phase == 1) {
  695. ra_node_page(sbi, nid);
  696. continue;
  697. }
  698. /* Get an inode by ino with checking validity */
  699. if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
  700. continue;
  701. if (phase == 2) {
  702. ra_node_page(sbi, dni.ino);
  703. continue;
  704. }
  705. ofs_in_node = le16_to_cpu(entry->ofs_in_node);
  706. if (phase == 3) {
  707. inode = f2fs_iget(sb, dni.ino);
  708. if (IS_ERR(inode) || is_bad_inode(inode))
  709. continue;
  710. /* if encrypted inode, let's go phase 3 */
  711. if (f2fs_encrypted_file(inode)) {
  712. add_gc_inode(gc_list, inode);
  713. continue;
  714. }
  715. if (!down_write_trylock(
  716. &F2FS_I(inode)->dio_rwsem[WRITE])) {
  717. iput(inode);
  718. continue;
  719. }
  720. start_bidx = start_bidx_of_node(nofs, inode);
  721. data_page = get_read_data_page(inode,
  722. start_bidx + ofs_in_node, REQ_RAHEAD,
  723. true);
  724. up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
  725. if (IS_ERR(data_page)) {
  726. iput(inode);
  727. continue;
  728. }
  729. f2fs_put_page(data_page, 0);
  730. add_gc_inode(gc_list, inode);
  731. continue;
  732. }
  733. /* phase 4 */
  734. inode = find_gc_inode(gc_list, dni.ino);
  735. if (inode) {
  736. struct f2fs_inode_info *fi = F2FS_I(inode);
  737. bool locked = false;
  738. if (S_ISREG(inode->i_mode)) {
  739. if (!down_write_trylock(&fi->dio_rwsem[READ]))
  740. continue;
  741. if (!down_write_trylock(
  742. &fi->dio_rwsem[WRITE])) {
  743. up_write(&fi->dio_rwsem[READ]);
  744. continue;
  745. }
  746. locked = true;
  747. /* wait for all inflight aio data */
  748. inode_dio_wait(inode);
  749. }
  750. start_bidx = start_bidx_of_node(nofs, inode)
  751. + ofs_in_node;
  752. if (f2fs_encrypted_file(inode))
  753. move_data_block(inode, start_bidx, segno, off);
  754. else
  755. move_data_page(inode, start_bidx, gc_type,
  756. segno, off);
  757. if (locked) {
  758. up_write(&fi->dio_rwsem[WRITE]);
  759. up_write(&fi->dio_rwsem[READ]);
  760. }
  761. stat_inc_data_blk_count(sbi, 1, gc_type);
  762. }
  763. }
  764. if (++phase < 5)
  765. goto next_step;
  766. }
  767. static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
  768. int gc_type)
  769. {
  770. struct sit_info *sit_i = SIT_I(sbi);
  771. int ret;
  772. down_write(&sit_i->sentry_lock);
  773. ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
  774. NO_CHECK_TYPE, LFS);
  775. up_write(&sit_i->sentry_lock);
  776. return ret;
  777. }
  778. static int do_garbage_collect(struct f2fs_sb_info *sbi,
  779. unsigned int start_segno,
  780. struct gc_inode_list *gc_list, int gc_type)
  781. {
  782. struct page *sum_page;
  783. struct f2fs_summary_block *sum;
  784. struct blk_plug plug;
  785. unsigned int segno = start_segno;
  786. unsigned int end_segno = start_segno + sbi->segs_per_sec;
  787. int seg_freed = 0;
  788. unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
  789. SUM_TYPE_DATA : SUM_TYPE_NODE;
  790. /* readahead multi ssa blocks those have contiguous address */
  791. if (sbi->segs_per_sec > 1)
  792. ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
  793. sbi->segs_per_sec, META_SSA, true);
  794. /* reference all summary page */
  795. while (segno < end_segno) {
  796. sum_page = get_sum_page(sbi, segno++);
  797. unlock_page(sum_page);
  798. }
  799. blk_start_plug(&plug);
  800. for (segno = start_segno; segno < end_segno; segno++) {
  801. /* find segment summary of victim */
  802. sum_page = find_get_page(META_MAPPING(sbi),
  803. GET_SUM_BLOCK(sbi, segno));
  804. f2fs_put_page(sum_page, 0);
  805. if (get_valid_blocks(sbi, segno, false) == 0 ||
  806. !PageUptodate(sum_page) ||
  807. unlikely(f2fs_cp_error(sbi)))
  808. goto next;
  809. sum = page_address(sum_page);
  810. f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
  811. /*
  812. * this is to avoid deadlock:
  813. * - lock_page(sum_page) - f2fs_replace_block
  814. * - check_valid_map() - down_write(sentry_lock)
  815. * - down_read(sentry_lock) - change_curseg()
  816. * - lock_page(sum_page)
  817. */
  818. if (type == SUM_TYPE_NODE)
  819. gc_node_segment(sbi, sum->entries, segno, gc_type);
  820. else
  821. gc_data_segment(sbi, sum->entries, gc_list, segno,
  822. gc_type);
  823. stat_inc_seg_count(sbi, type, gc_type);
  824. if (gc_type == FG_GC &&
  825. get_valid_blocks(sbi, segno, false) == 0)
  826. seg_freed++;
  827. next:
  828. f2fs_put_page(sum_page, 0);
  829. }
  830. if (gc_type == FG_GC)
  831. f2fs_submit_merged_write(sbi,
  832. (type == SUM_TYPE_NODE) ? NODE : DATA);
  833. blk_finish_plug(&plug);
  834. stat_inc_call_count(sbi->stat_info);
  835. return seg_freed;
  836. }
  837. int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
  838. bool background, unsigned int segno)
  839. {
  840. int gc_type = sync ? FG_GC : BG_GC;
  841. int sec_freed = 0, seg_freed = 0, total_freed = 0;
  842. int ret = 0;
  843. struct cp_control cpc;
  844. unsigned int init_segno = segno;
  845. struct gc_inode_list gc_list = {
  846. .ilist = LIST_HEAD_INIT(gc_list.ilist),
  847. .iroot = RADIX_TREE_INIT(GFP_NOFS),
  848. };
  849. trace_f2fs_gc_begin(sbi->sb, sync, background,
  850. get_pages(sbi, F2FS_DIRTY_NODES),
  851. get_pages(sbi, F2FS_DIRTY_DENTS),
  852. get_pages(sbi, F2FS_DIRTY_IMETA),
  853. free_sections(sbi),
  854. free_segments(sbi),
  855. reserved_segments(sbi),
  856. prefree_segments(sbi));
  857. cpc.reason = __get_cp_reason(sbi);
  858. gc_more:
  859. if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
  860. ret = -EINVAL;
  861. goto stop;
  862. }
  863. if (unlikely(f2fs_cp_error(sbi))) {
  864. ret = -EIO;
  865. goto stop;
  866. }
  867. if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
  868. /*
  869. * For example, if there are many prefree_segments below given
  870. * threshold, we can make them free by checkpoint. Then, we
  871. * secure free segments which doesn't need fggc any more.
  872. */
  873. if (prefree_segments(sbi)) {
  874. ret = write_checkpoint(sbi, &cpc);
  875. if (ret)
  876. goto stop;
  877. }
  878. if (has_not_enough_free_secs(sbi, 0, 0))
  879. gc_type = FG_GC;
  880. }
  881. /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
  882. if (gc_type == BG_GC && !background) {
  883. ret = -EINVAL;
  884. goto stop;
  885. }
  886. if (!__get_victim(sbi, &segno, gc_type)) {
  887. ret = -ENODATA;
  888. goto stop;
  889. }
  890. seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
  891. if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
  892. sec_freed++;
  893. total_freed += seg_freed;
  894. if (gc_type == FG_GC)
  895. sbi->cur_victim_sec = NULL_SEGNO;
  896. if (!sync) {
  897. if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
  898. segno = NULL_SEGNO;
  899. goto gc_more;
  900. }
  901. if (gc_type == FG_GC)
  902. ret = write_checkpoint(sbi, &cpc);
  903. }
  904. stop:
  905. SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
  906. SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
  907. trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
  908. get_pages(sbi, F2FS_DIRTY_NODES),
  909. get_pages(sbi, F2FS_DIRTY_DENTS),
  910. get_pages(sbi, F2FS_DIRTY_IMETA),
  911. free_sections(sbi),
  912. free_segments(sbi),
  913. reserved_segments(sbi),
  914. prefree_segments(sbi));
  915. mutex_unlock(&sbi->gc_mutex);
  916. put_gc_inode(&gc_list);
  917. if (sync)
  918. ret = sec_freed ? 0 : -EAGAIN;
  919. return ret;
  920. }
  921. void build_gc_manager(struct f2fs_sb_info *sbi)
  922. {
  923. u64 main_count, resv_count, ovp_count;
  924. DIRTY_I(sbi)->v_ops = &default_v_ops;
  925. /* threshold of # of valid blocks in a section for victims of FG_GC */
  926. main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
  927. resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
  928. ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
  929. sbi->fggc_threshold = div64_u64((main_count - ovp_count) *
  930. BLKS_PER_SEC(sbi), (main_count - resv_count));
  931. sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
  932. /* give warm/cold data area from slower device */
  933. if (sbi->s_ndevs && sbi->segs_per_sec == 1)
  934. SIT_I(sbi)->last_victim[ALLOC_NEXT] =
  935. GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
  936. }