segment.c 73 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932
  1. /*
  2. * fs/f2fs/segment.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/f2fs_fs.h>
  13. #include <linux/bio.h>
  14. #include <linux/blkdev.h>
  15. #include <linux/prefetch.h>
  16. #include <linux/kthread.h>
  17. #include <linux/swap.h>
  18. #include <linux/timer.h>
  19. #include "f2fs.h"
  20. #include "segment.h"
  21. #include "node.h"
  22. #include "trace.h"
  23. #include <trace/events/f2fs.h>
  24. #define __reverse_ffz(x) __reverse_ffs(~(x))
  25. static struct kmem_cache *discard_entry_slab;
  26. static struct kmem_cache *discard_cmd_slab;
  27. static struct kmem_cache *sit_entry_set_slab;
  28. static struct kmem_cache *inmem_entry_slab;
  29. static unsigned long __reverse_ulong(unsigned char *str)
  30. {
  31. unsigned long tmp = 0;
  32. int shift = 24, idx = 0;
  33. #if BITS_PER_LONG == 64
  34. shift = 56;
  35. #endif
  36. while (shift >= 0) {
  37. tmp |= (unsigned long)str[idx++] << shift;
  38. shift -= BITS_PER_BYTE;
  39. }
  40. return tmp;
  41. }
  42. /*
  43. * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
  44. * MSB and LSB are reversed in a byte by f2fs_set_bit.
  45. */
  46. static inline unsigned long __reverse_ffs(unsigned long word)
  47. {
  48. int num = 0;
  49. #if BITS_PER_LONG == 64
  50. if ((word & 0xffffffff00000000UL) == 0)
  51. num += 32;
  52. else
  53. word >>= 32;
  54. #endif
  55. if ((word & 0xffff0000) == 0)
  56. num += 16;
  57. else
  58. word >>= 16;
  59. if ((word & 0xff00) == 0)
  60. num += 8;
  61. else
  62. word >>= 8;
  63. if ((word & 0xf0) == 0)
  64. num += 4;
  65. else
  66. word >>= 4;
  67. if ((word & 0xc) == 0)
  68. num += 2;
  69. else
  70. word >>= 2;
  71. if ((word & 0x2) == 0)
  72. num += 1;
  73. return num;
  74. }
  75. /*
  76. * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
  77. * f2fs_set_bit makes MSB and LSB reversed in a byte.
  78. * @size must be integral times of unsigned long.
  79. * Example:
  80. * MSB <--> LSB
  81. * f2fs_set_bit(0, bitmap) => 1000 0000
  82. * f2fs_set_bit(7, bitmap) => 0000 0001
  83. */
  84. static unsigned long __find_rev_next_bit(const unsigned long *addr,
  85. unsigned long size, unsigned long offset)
  86. {
  87. const unsigned long *p = addr + BIT_WORD(offset);
  88. unsigned long result = size;
  89. unsigned long tmp;
  90. if (offset >= size)
  91. return size;
  92. size -= (offset & ~(BITS_PER_LONG - 1));
  93. offset %= BITS_PER_LONG;
  94. while (1) {
  95. if (*p == 0)
  96. goto pass;
  97. tmp = __reverse_ulong((unsigned char *)p);
  98. tmp &= ~0UL >> offset;
  99. if (size < BITS_PER_LONG)
  100. tmp &= (~0UL << (BITS_PER_LONG - size));
  101. if (tmp)
  102. goto found;
  103. pass:
  104. if (size <= BITS_PER_LONG)
  105. break;
  106. size -= BITS_PER_LONG;
  107. offset = 0;
  108. p++;
  109. }
  110. return result;
  111. found:
  112. return result - size + __reverse_ffs(tmp);
  113. }
  114. static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
  115. unsigned long size, unsigned long offset)
  116. {
  117. const unsigned long *p = addr + BIT_WORD(offset);
  118. unsigned long result = size;
  119. unsigned long tmp;
  120. if (offset >= size)
  121. return size;
  122. size -= (offset & ~(BITS_PER_LONG - 1));
  123. offset %= BITS_PER_LONG;
  124. while (1) {
  125. if (*p == ~0UL)
  126. goto pass;
  127. tmp = __reverse_ulong((unsigned char *)p);
  128. if (offset)
  129. tmp |= ~0UL << (BITS_PER_LONG - offset);
  130. if (size < BITS_PER_LONG)
  131. tmp |= ~0UL >> size;
  132. if (tmp != ~0UL)
  133. goto found;
  134. pass:
  135. if (size <= BITS_PER_LONG)
  136. break;
  137. size -= BITS_PER_LONG;
  138. offset = 0;
  139. p++;
  140. }
  141. return result;
  142. found:
  143. return result - size + __reverse_ffz(tmp);
  144. }
  145. void register_inmem_page(struct inode *inode, struct page *page)
  146. {
  147. struct f2fs_inode_info *fi = F2FS_I(inode);
  148. struct inmem_pages *new;
  149. f2fs_trace_pid(page);
  150. set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
  151. SetPagePrivate(page);
  152. new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
  153. /* add atomic page indices to the list */
  154. new->page = page;
  155. INIT_LIST_HEAD(&new->list);
  156. /* increase reference count with clean state */
  157. mutex_lock(&fi->inmem_lock);
  158. get_page(page);
  159. list_add_tail(&new->list, &fi->inmem_pages);
  160. inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
  161. mutex_unlock(&fi->inmem_lock);
  162. trace_f2fs_register_inmem_page(page, INMEM);
  163. }
  164. static int __revoke_inmem_pages(struct inode *inode,
  165. struct list_head *head, bool drop, bool recover)
  166. {
  167. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  168. struct inmem_pages *cur, *tmp;
  169. int err = 0;
  170. list_for_each_entry_safe(cur, tmp, head, list) {
  171. struct page *page = cur->page;
  172. if (drop)
  173. trace_f2fs_commit_inmem_page(page, INMEM_DROP);
  174. lock_page(page);
  175. if (recover) {
  176. struct dnode_of_data dn;
  177. struct node_info ni;
  178. trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
  179. set_new_dnode(&dn, inode, NULL, NULL, 0);
  180. if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) {
  181. err = -EAGAIN;
  182. goto next;
  183. }
  184. get_node_info(sbi, dn.nid, &ni);
  185. f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
  186. cur->old_addr, ni.version, true, true);
  187. f2fs_put_dnode(&dn);
  188. }
  189. next:
  190. /* we don't need to invalidate this in the sccessful status */
  191. if (drop || recover)
  192. ClearPageUptodate(page);
  193. set_page_private(page, 0);
  194. ClearPagePrivate(page);
  195. f2fs_put_page(page, 1);
  196. list_del(&cur->list);
  197. kmem_cache_free(inmem_entry_slab, cur);
  198. dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
  199. }
  200. return err;
  201. }
  202. void drop_inmem_pages(struct inode *inode)
  203. {
  204. struct f2fs_inode_info *fi = F2FS_I(inode);
  205. mutex_lock(&fi->inmem_lock);
  206. __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
  207. mutex_unlock(&fi->inmem_lock);
  208. clear_inode_flag(inode, FI_ATOMIC_FILE);
  209. stat_dec_atomic_write(inode);
  210. }
  211. static int __commit_inmem_pages(struct inode *inode,
  212. struct list_head *revoke_list)
  213. {
  214. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  215. struct f2fs_inode_info *fi = F2FS_I(inode);
  216. struct inmem_pages *cur, *tmp;
  217. struct f2fs_io_info fio = {
  218. .sbi = sbi,
  219. .type = DATA,
  220. .op = REQ_OP_WRITE,
  221. .op_flags = REQ_SYNC | REQ_PRIO,
  222. .encrypted_page = NULL,
  223. };
  224. bool submit_bio = false;
  225. int err = 0;
  226. list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
  227. struct page *page = cur->page;
  228. lock_page(page);
  229. if (page->mapping == inode->i_mapping) {
  230. trace_f2fs_commit_inmem_page(page, INMEM);
  231. set_page_dirty(page);
  232. f2fs_wait_on_page_writeback(page, DATA, true);
  233. if (clear_page_dirty_for_io(page)) {
  234. inode_dec_dirty_pages(inode);
  235. remove_dirty_inode(inode);
  236. }
  237. fio.page = page;
  238. err = do_write_data_page(&fio);
  239. if (err) {
  240. unlock_page(page);
  241. break;
  242. }
  243. /* record old blkaddr for revoking */
  244. cur->old_addr = fio.old_blkaddr;
  245. submit_bio = true;
  246. }
  247. unlock_page(page);
  248. list_move_tail(&cur->list, revoke_list);
  249. }
  250. if (submit_bio)
  251. f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
  252. if (!err)
  253. __revoke_inmem_pages(inode, revoke_list, false, false);
  254. return err;
  255. }
  256. int commit_inmem_pages(struct inode *inode)
  257. {
  258. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  259. struct f2fs_inode_info *fi = F2FS_I(inode);
  260. struct list_head revoke_list;
  261. int err;
  262. INIT_LIST_HEAD(&revoke_list);
  263. f2fs_balance_fs(sbi, true);
  264. f2fs_lock_op(sbi);
  265. set_inode_flag(inode, FI_ATOMIC_COMMIT);
  266. mutex_lock(&fi->inmem_lock);
  267. err = __commit_inmem_pages(inode, &revoke_list);
  268. if (err) {
  269. int ret;
  270. /*
  271. * try to revoke all committed pages, but still we could fail
  272. * due to no memory or other reason, if that happened, EAGAIN
  273. * will be returned, which means in such case, transaction is
  274. * already not integrity, caller should use journal to do the
  275. * recovery or rewrite & commit last transaction. For other
  276. * error number, revoking was done by filesystem itself.
  277. */
  278. ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
  279. if (ret)
  280. err = ret;
  281. /* drop all uncommitted pages */
  282. __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
  283. }
  284. mutex_unlock(&fi->inmem_lock);
  285. clear_inode_flag(inode, FI_ATOMIC_COMMIT);
  286. f2fs_unlock_op(sbi);
  287. return err;
  288. }
  289. /*
  290. * This function balances dirty node and dentry pages.
  291. * In addition, it controls garbage collection.
  292. */
  293. void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
  294. {
  295. #ifdef CONFIG_F2FS_FAULT_INJECTION
  296. if (time_to_inject(sbi, FAULT_CHECKPOINT))
  297. f2fs_stop_checkpoint(sbi, false);
  298. #endif
  299. if (!need)
  300. return;
  301. /* balance_fs_bg is able to be pending */
  302. if (excess_cached_nats(sbi))
  303. f2fs_balance_fs_bg(sbi);
  304. /*
  305. * We should do GC or end up with checkpoint, if there are so many dirty
  306. * dir/node pages without enough free segments.
  307. */
  308. if (has_not_enough_free_secs(sbi, 0, 0)) {
  309. mutex_lock(&sbi->gc_mutex);
  310. f2fs_gc(sbi, false, false);
  311. }
  312. }
  313. void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
  314. {
  315. /* try to shrink extent cache when there is no enough memory */
  316. if (!available_free_memory(sbi, EXTENT_CACHE))
  317. f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
  318. /* check the # of cached NAT entries */
  319. if (!available_free_memory(sbi, NAT_ENTRIES))
  320. try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
  321. if (!available_free_memory(sbi, FREE_NIDS))
  322. try_to_free_nids(sbi, MAX_FREE_NIDS);
  323. else
  324. build_free_nids(sbi, false);
  325. if (!is_idle(sbi))
  326. return;
  327. /* checkpoint is the only way to shrink partial cached entries */
  328. if (!available_free_memory(sbi, NAT_ENTRIES) ||
  329. !available_free_memory(sbi, INO_ENTRIES) ||
  330. excess_prefree_segs(sbi) ||
  331. excess_dirty_nats(sbi) ||
  332. f2fs_time_over(sbi, CP_TIME)) {
  333. if (test_opt(sbi, DATA_FLUSH)) {
  334. struct blk_plug plug;
  335. blk_start_plug(&plug);
  336. sync_dirty_inodes(sbi, FILE_INODE);
  337. blk_finish_plug(&plug);
  338. }
  339. f2fs_sync_fs(sbi->sb, true);
  340. stat_inc_bg_cp_count(sbi->stat_info);
  341. }
  342. }
  343. static int __submit_flush_wait(struct block_device *bdev)
  344. {
  345. struct bio *bio = f2fs_bio_alloc(0);
  346. int ret;
  347. bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
  348. bio->bi_bdev = bdev;
  349. ret = submit_bio_wait(bio);
  350. bio_put(bio);
  351. return ret;
  352. }
  353. static int submit_flush_wait(struct f2fs_sb_info *sbi)
  354. {
  355. int ret = __submit_flush_wait(sbi->sb->s_bdev);
  356. int i;
  357. if (sbi->s_ndevs && !ret) {
  358. for (i = 1; i < sbi->s_ndevs; i++) {
  359. ret = __submit_flush_wait(FDEV(i).bdev);
  360. if (ret)
  361. break;
  362. }
  363. }
  364. return ret;
  365. }
  366. static int issue_flush_thread(void *data)
  367. {
  368. struct f2fs_sb_info *sbi = data;
  369. struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
  370. wait_queue_head_t *q = &fcc->flush_wait_queue;
  371. repeat:
  372. if (kthread_should_stop())
  373. return 0;
  374. if (!llist_empty(&fcc->issue_list)) {
  375. struct flush_cmd *cmd, *next;
  376. int ret;
  377. fcc->dispatch_list = llist_del_all(&fcc->issue_list);
  378. fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
  379. ret = submit_flush_wait(sbi);
  380. llist_for_each_entry_safe(cmd, next,
  381. fcc->dispatch_list, llnode) {
  382. cmd->ret = ret;
  383. complete(&cmd->wait);
  384. }
  385. fcc->dispatch_list = NULL;
  386. }
  387. wait_event_interruptible(*q,
  388. kthread_should_stop() || !llist_empty(&fcc->issue_list));
  389. goto repeat;
  390. }
  391. int f2fs_issue_flush(struct f2fs_sb_info *sbi)
  392. {
  393. struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
  394. struct flush_cmd cmd;
  395. trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
  396. test_opt(sbi, FLUSH_MERGE));
  397. if (test_opt(sbi, NOBARRIER))
  398. return 0;
  399. if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
  400. int ret;
  401. atomic_inc(&fcc->submit_flush);
  402. ret = submit_flush_wait(sbi);
  403. atomic_dec(&fcc->submit_flush);
  404. return ret;
  405. }
  406. init_completion(&cmd.wait);
  407. atomic_inc(&fcc->submit_flush);
  408. llist_add(&cmd.llnode, &fcc->issue_list);
  409. if (!fcc->dispatch_list)
  410. wake_up(&fcc->flush_wait_queue);
  411. if (fcc->f2fs_issue_flush) {
  412. wait_for_completion(&cmd.wait);
  413. atomic_dec(&fcc->submit_flush);
  414. } else {
  415. llist_del_all(&fcc->issue_list);
  416. atomic_set(&fcc->submit_flush, 0);
  417. }
  418. return cmd.ret;
  419. }
  420. int create_flush_cmd_control(struct f2fs_sb_info *sbi)
  421. {
  422. dev_t dev = sbi->sb->s_bdev->bd_dev;
  423. struct flush_cmd_control *fcc;
  424. int err = 0;
  425. if (SM_I(sbi)->fcc_info) {
  426. fcc = SM_I(sbi)->fcc_info;
  427. goto init_thread;
  428. }
  429. fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
  430. if (!fcc)
  431. return -ENOMEM;
  432. atomic_set(&fcc->submit_flush, 0);
  433. init_waitqueue_head(&fcc->flush_wait_queue);
  434. init_llist_head(&fcc->issue_list);
  435. SM_I(sbi)->fcc_info = fcc;
  436. init_thread:
  437. fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
  438. "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
  439. if (IS_ERR(fcc->f2fs_issue_flush)) {
  440. err = PTR_ERR(fcc->f2fs_issue_flush);
  441. kfree(fcc);
  442. SM_I(sbi)->fcc_info = NULL;
  443. return err;
  444. }
  445. return err;
  446. }
  447. void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
  448. {
  449. struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
  450. if (fcc && fcc->f2fs_issue_flush) {
  451. struct task_struct *flush_thread = fcc->f2fs_issue_flush;
  452. fcc->f2fs_issue_flush = NULL;
  453. kthread_stop(flush_thread);
  454. }
  455. if (free) {
  456. kfree(fcc);
  457. SM_I(sbi)->fcc_info = NULL;
  458. }
  459. }
  460. static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
  461. enum dirty_type dirty_type)
  462. {
  463. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  464. /* need not be added */
  465. if (IS_CURSEG(sbi, segno))
  466. return;
  467. if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
  468. dirty_i->nr_dirty[dirty_type]++;
  469. if (dirty_type == DIRTY) {
  470. struct seg_entry *sentry = get_seg_entry(sbi, segno);
  471. enum dirty_type t = sentry->type;
  472. if (unlikely(t >= DIRTY)) {
  473. f2fs_bug_on(sbi, 1);
  474. return;
  475. }
  476. if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
  477. dirty_i->nr_dirty[t]++;
  478. }
  479. }
  480. static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
  481. enum dirty_type dirty_type)
  482. {
  483. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  484. if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
  485. dirty_i->nr_dirty[dirty_type]--;
  486. if (dirty_type == DIRTY) {
  487. struct seg_entry *sentry = get_seg_entry(sbi, segno);
  488. enum dirty_type t = sentry->type;
  489. if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
  490. dirty_i->nr_dirty[t]--;
  491. if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
  492. clear_bit(GET_SECNO(sbi, segno),
  493. dirty_i->victim_secmap);
  494. }
  495. }
  496. /*
  497. * Should not occur error such as -ENOMEM.
  498. * Adding dirty entry into seglist is not critical operation.
  499. * If a given segment is one of current working segments, it won't be added.
  500. */
  501. static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
  502. {
  503. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  504. unsigned short valid_blocks;
  505. if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
  506. return;
  507. mutex_lock(&dirty_i->seglist_lock);
  508. valid_blocks = get_valid_blocks(sbi, segno, 0);
  509. if (valid_blocks == 0) {
  510. __locate_dirty_segment(sbi, segno, PRE);
  511. __remove_dirty_segment(sbi, segno, DIRTY);
  512. } else if (valid_blocks < sbi->blocks_per_seg) {
  513. __locate_dirty_segment(sbi, segno, DIRTY);
  514. } else {
  515. /* Recovery routine with SSR needs this */
  516. __remove_dirty_segment(sbi, segno, DIRTY);
  517. }
  518. mutex_unlock(&dirty_i->seglist_lock);
  519. }
  520. static struct discard_cmd *__add_discard_cmd(struct f2fs_sb_info *sbi,
  521. struct bio *bio, block_t lstart, block_t len)
  522. {
  523. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  524. struct list_head *cmd_list = &(dcc->discard_cmd_list);
  525. struct discard_cmd *dc;
  526. dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
  527. INIT_LIST_HEAD(&dc->list);
  528. dc->bio = bio;
  529. dc->lstart = lstart;
  530. dc->len = len;
  531. init_completion(&dc->wait);
  532. list_add_tail(&dc->list, cmd_list);
  533. return dc;
  534. }
  535. /* This should be covered by global mutex, &sit_i->sentry_lock */
  536. void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
  537. {
  538. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  539. struct list_head *wait_list = &(dcc->discard_cmd_list);
  540. struct discard_cmd *dc, *tmp;
  541. list_for_each_entry_safe(dc, tmp, wait_list, list) {
  542. struct bio *bio = dc->bio;
  543. int err;
  544. if (!completion_done(&dc->wait)) {
  545. if ((dc->lstart <= blkaddr &&
  546. blkaddr < dc->lstart + dc->len) ||
  547. blkaddr == NULL_ADDR)
  548. wait_for_completion_io(&dc->wait);
  549. else
  550. continue;
  551. }
  552. err = bio->bi_error;
  553. if (err == -EOPNOTSUPP)
  554. err = 0;
  555. if (err)
  556. f2fs_msg(sbi->sb, KERN_INFO,
  557. "Issue discard failed, ret: %d", err);
  558. bio_put(bio);
  559. list_del(&dc->list);
  560. kmem_cache_free(discard_cmd_slab, dc);
  561. }
  562. }
  563. static void f2fs_submit_discard_endio(struct bio *bio)
  564. {
  565. struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
  566. complete(&dc->wait);
  567. }
  568. /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
  569. static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi,
  570. struct block_device *bdev, block_t blkstart, block_t blklen)
  571. {
  572. struct bio *bio = NULL;
  573. block_t lblkstart = blkstart;
  574. int err;
  575. trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
  576. if (sbi->s_ndevs) {
  577. int devi = f2fs_target_device_index(sbi, blkstart);
  578. blkstart -= FDEV(devi).start_blk;
  579. }
  580. err = __blkdev_issue_discard(bdev,
  581. SECTOR_FROM_BLOCK(blkstart),
  582. SECTOR_FROM_BLOCK(blklen),
  583. GFP_NOFS, 0, &bio);
  584. if (!err && bio) {
  585. struct discard_cmd *dc = __add_discard_cmd(sbi, bio,
  586. lblkstart, blklen);
  587. bio->bi_private = dc;
  588. bio->bi_end_io = f2fs_submit_discard_endio;
  589. bio->bi_opf |= REQ_SYNC;
  590. submit_bio(bio);
  591. }
  592. return err;
  593. }
  594. #ifdef CONFIG_BLK_DEV_ZONED
  595. static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
  596. struct block_device *bdev, block_t blkstart, block_t blklen)
  597. {
  598. sector_t nr_sects = SECTOR_FROM_BLOCK(blklen);
  599. sector_t sector;
  600. int devi = 0;
  601. if (sbi->s_ndevs) {
  602. devi = f2fs_target_device_index(sbi, blkstart);
  603. blkstart -= FDEV(devi).start_blk;
  604. }
  605. sector = SECTOR_FROM_BLOCK(blkstart);
  606. if (sector & (bdev_zone_sectors(bdev) - 1) ||
  607. nr_sects != bdev_zone_sectors(bdev)) {
  608. f2fs_msg(sbi->sb, KERN_INFO,
  609. "(%d) %s: Unaligned discard attempted (block %x + %x)",
  610. devi, sbi->s_ndevs ? FDEV(devi).path: "",
  611. blkstart, blklen);
  612. return -EIO;
  613. }
  614. /*
  615. * We need to know the type of the zone: for conventional zones,
  616. * use regular discard if the drive supports it. For sequential
  617. * zones, reset the zone write pointer.
  618. */
  619. switch (get_blkz_type(sbi, bdev, blkstart)) {
  620. case BLK_ZONE_TYPE_CONVENTIONAL:
  621. if (!blk_queue_discard(bdev_get_queue(bdev)))
  622. return 0;
  623. return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
  624. case BLK_ZONE_TYPE_SEQWRITE_REQ:
  625. case BLK_ZONE_TYPE_SEQWRITE_PREF:
  626. trace_f2fs_issue_reset_zone(sbi->sb, blkstart);
  627. return blkdev_reset_zones(bdev, sector,
  628. nr_sects, GFP_NOFS);
  629. default:
  630. /* Unknown zone type: broken device ? */
  631. return -EIO;
  632. }
  633. }
  634. #endif
  635. static int __issue_discard_async(struct f2fs_sb_info *sbi,
  636. struct block_device *bdev, block_t blkstart, block_t blklen)
  637. {
  638. #ifdef CONFIG_BLK_DEV_ZONED
  639. if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
  640. bdev_zoned_model(bdev) != BLK_ZONED_NONE)
  641. return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
  642. #endif
  643. return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
  644. }
  645. static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
  646. block_t blkstart, block_t blklen)
  647. {
  648. sector_t start = blkstart, len = 0;
  649. struct block_device *bdev;
  650. struct seg_entry *se;
  651. unsigned int offset;
  652. block_t i;
  653. int err = 0;
  654. bdev = f2fs_target_device(sbi, blkstart, NULL);
  655. for (i = blkstart; i < blkstart + blklen; i++, len++) {
  656. if (i != start) {
  657. struct block_device *bdev2 =
  658. f2fs_target_device(sbi, i, NULL);
  659. if (bdev2 != bdev) {
  660. err = __issue_discard_async(sbi, bdev,
  661. start, len);
  662. if (err)
  663. return err;
  664. bdev = bdev2;
  665. start = i;
  666. len = 0;
  667. }
  668. }
  669. se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
  670. offset = GET_BLKOFF_FROM_SEG0(sbi, i);
  671. if (!f2fs_test_and_set_bit(offset, se->discard_map))
  672. sbi->discard_blks--;
  673. }
  674. if (len)
  675. err = __issue_discard_async(sbi, bdev, start, len);
  676. return err;
  677. }
  678. static void __add_discard_entry(struct f2fs_sb_info *sbi,
  679. struct cp_control *cpc, struct seg_entry *se,
  680. unsigned int start, unsigned int end)
  681. {
  682. struct list_head *head = &SM_I(sbi)->dcc_info->discard_entry_list;
  683. struct discard_entry *new, *last;
  684. if (!list_empty(head)) {
  685. last = list_last_entry(head, struct discard_entry, list);
  686. if (START_BLOCK(sbi, cpc->trim_start) + start ==
  687. last->blkaddr + last->len) {
  688. last->len += end - start;
  689. goto done;
  690. }
  691. }
  692. new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
  693. INIT_LIST_HEAD(&new->list);
  694. new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
  695. new->len = end - start;
  696. list_add_tail(&new->list, head);
  697. done:
  698. SM_I(sbi)->dcc_info->nr_discards += end - start;
  699. }
  700. static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
  701. bool check_only)
  702. {
  703. int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
  704. int max_blocks = sbi->blocks_per_seg;
  705. struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
  706. unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
  707. unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
  708. unsigned long *discard_map = (unsigned long *)se->discard_map;
  709. unsigned long *dmap = SIT_I(sbi)->tmp_map;
  710. unsigned int start = 0, end = -1;
  711. bool force = (cpc->reason == CP_DISCARD);
  712. int i;
  713. if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
  714. return false;
  715. if (!force) {
  716. if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
  717. SM_I(sbi)->dcc_info->nr_discards >=
  718. SM_I(sbi)->dcc_info->max_discards)
  719. return false;
  720. }
  721. /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
  722. for (i = 0; i < entries; i++)
  723. dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
  724. (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
  725. while (force || SM_I(sbi)->dcc_info->nr_discards <=
  726. SM_I(sbi)->dcc_info->max_discards) {
  727. start = __find_rev_next_bit(dmap, max_blocks, end + 1);
  728. if (start >= max_blocks)
  729. break;
  730. end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
  731. if (force && start && end != max_blocks
  732. && (end - start) < cpc->trim_minlen)
  733. continue;
  734. if (check_only)
  735. return true;
  736. __add_discard_entry(sbi, cpc, se, start, end);
  737. }
  738. return false;
  739. }
  740. void release_discard_addrs(struct f2fs_sb_info *sbi)
  741. {
  742. struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
  743. struct discard_entry *entry, *this;
  744. /* drop caches */
  745. list_for_each_entry_safe(entry, this, head, list) {
  746. list_del(&entry->list);
  747. kmem_cache_free(discard_entry_slab, entry);
  748. }
  749. }
  750. /*
  751. * Should call clear_prefree_segments after checkpoint is done.
  752. */
  753. static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
  754. {
  755. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  756. unsigned int segno;
  757. mutex_lock(&dirty_i->seglist_lock);
  758. for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
  759. __set_test_and_free(sbi, segno);
  760. mutex_unlock(&dirty_i->seglist_lock);
  761. }
  762. void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  763. {
  764. struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
  765. struct discard_entry *entry, *this;
  766. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  767. struct blk_plug plug;
  768. unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
  769. unsigned int start = 0, end = -1;
  770. unsigned int secno, start_segno;
  771. bool force = (cpc->reason == CP_DISCARD);
  772. blk_start_plug(&plug);
  773. mutex_lock(&dirty_i->seglist_lock);
  774. while (1) {
  775. int i;
  776. start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
  777. if (start >= MAIN_SEGS(sbi))
  778. break;
  779. end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
  780. start + 1);
  781. for (i = start; i < end; i++)
  782. clear_bit(i, prefree_map);
  783. dirty_i->nr_dirty[PRE] -= end - start;
  784. if (!test_opt(sbi, DISCARD))
  785. continue;
  786. if (force && start >= cpc->trim_start &&
  787. (end - 1) <= cpc->trim_end)
  788. continue;
  789. if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
  790. f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
  791. (end - start) << sbi->log_blocks_per_seg);
  792. continue;
  793. }
  794. next:
  795. secno = GET_SECNO(sbi, start);
  796. start_segno = secno * sbi->segs_per_sec;
  797. if (!IS_CURSEC(sbi, secno) &&
  798. !get_valid_blocks(sbi, start, sbi->segs_per_sec))
  799. f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
  800. sbi->segs_per_sec << sbi->log_blocks_per_seg);
  801. start = start_segno + sbi->segs_per_sec;
  802. if (start < end)
  803. goto next;
  804. }
  805. mutex_unlock(&dirty_i->seglist_lock);
  806. /* send small discards */
  807. list_for_each_entry_safe(entry, this, head, list) {
  808. if (force && entry->len < cpc->trim_minlen)
  809. goto skip;
  810. f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
  811. cpc->trimmed += entry->len;
  812. skip:
  813. list_del(&entry->list);
  814. SM_I(sbi)->dcc_info->nr_discards -= entry->len;
  815. kmem_cache_free(discard_entry_slab, entry);
  816. }
  817. blk_finish_plug(&plug);
  818. }
  819. int create_discard_cmd_control(struct f2fs_sb_info *sbi)
  820. {
  821. struct discard_cmd_control *dcc;
  822. int err = 0;
  823. if (SM_I(sbi)->dcc_info) {
  824. dcc = SM_I(sbi)->dcc_info;
  825. goto init_thread;
  826. }
  827. dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
  828. if (!dcc)
  829. return -ENOMEM;
  830. INIT_LIST_HEAD(&dcc->discard_entry_list);
  831. INIT_LIST_HEAD(&dcc->discard_cmd_list);
  832. dcc->nr_discards = 0;
  833. dcc->max_discards = 0;
  834. SM_I(sbi)->dcc_info = dcc;
  835. init_thread:
  836. return err;
  837. }
  838. void destroy_discard_cmd_control(struct f2fs_sb_info *sbi, bool free)
  839. {
  840. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  841. if (free) {
  842. kfree(dcc);
  843. SM_I(sbi)->dcc_info = NULL;
  844. }
  845. }
  846. static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
  847. {
  848. struct sit_info *sit_i = SIT_I(sbi);
  849. if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
  850. sit_i->dirty_sentries++;
  851. return false;
  852. }
  853. return true;
  854. }
  855. static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
  856. unsigned int segno, int modified)
  857. {
  858. struct seg_entry *se = get_seg_entry(sbi, segno);
  859. se->type = type;
  860. if (modified)
  861. __mark_sit_entry_dirty(sbi, segno);
  862. }
  863. static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
  864. {
  865. struct seg_entry *se;
  866. unsigned int segno, offset;
  867. long int new_vblocks;
  868. segno = GET_SEGNO(sbi, blkaddr);
  869. se = get_seg_entry(sbi, segno);
  870. new_vblocks = se->valid_blocks + del;
  871. offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
  872. f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
  873. (new_vblocks > sbi->blocks_per_seg)));
  874. se->valid_blocks = new_vblocks;
  875. se->mtime = get_mtime(sbi);
  876. SIT_I(sbi)->max_mtime = se->mtime;
  877. /* Update valid block bitmap */
  878. if (del > 0) {
  879. if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) {
  880. #ifdef CONFIG_F2FS_CHECK_FS
  881. if (f2fs_test_and_set_bit(offset,
  882. se->cur_valid_map_mir))
  883. f2fs_bug_on(sbi, 1);
  884. else
  885. WARN_ON(1);
  886. #else
  887. f2fs_bug_on(sbi, 1);
  888. #endif
  889. }
  890. if (f2fs_discard_en(sbi) &&
  891. !f2fs_test_and_set_bit(offset, se->discard_map))
  892. sbi->discard_blks--;
  893. } else {
  894. if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
  895. #ifdef CONFIG_F2FS_CHECK_FS
  896. if (!f2fs_test_and_clear_bit(offset,
  897. se->cur_valid_map_mir))
  898. f2fs_bug_on(sbi, 1);
  899. else
  900. WARN_ON(1);
  901. #else
  902. f2fs_bug_on(sbi, 1);
  903. #endif
  904. }
  905. if (f2fs_discard_en(sbi) &&
  906. f2fs_test_and_clear_bit(offset, se->discard_map))
  907. sbi->discard_blks++;
  908. }
  909. if (!f2fs_test_bit(offset, se->ckpt_valid_map))
  910. se->ckpt_valid_blocks += del;
  911. __mark_sit_entry_dirty(sbi, segno);
  912. /* update total number of valid blocks to be written in ckpt area */
  913. SIT_I(sbi)->written_valid_blocks += del;
  914. if (sbi->segs_per_sec > 1)
  915. get_sec_entry(sbi, segno)->valid_blocks += del;
  916. }
  917. void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
  918. {
  919. update_sit_entry(sbi, new, 1);
  920. if (GET_SEGNO(sbi, old) != NULL_SEGNO)
  921. update_sit_entry(sbi, old, -1);
  922. locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
  923. locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
  924. }
  925. void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
  926. {
  927. unsigned int segno = GET_SEGNO(sbi, addr);
  928. struct sit_info *sit_i = SIT_I(sbi);
  929. f2fs_bug_on(sbi, addr == NULL_ADDR);
  930. if (addr == NEW_ADDR)
  931. return;
  932. /* add it into sit main buffer */
  933. mutex_lock(&sit_i->sentry_lock);
  934. update_sit_entry(sbi, addr, -1);
  935. /* add it into dirty seglist */
  936. locate_dirty_segment(sbi, segno);
  937. mutex_unlock(&sit_i->sentry_lock);
  938. }
  939. bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
  940. {
  941. struct sit_info *sit_i = SIT_I(sbi);
  942. unsigned int segno, offset;
  943. struct seg_entry *se;
  944. bool is_cp = false;
  945. if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
  946. return true;
  947. mutex_lock(&sit_i->sentry_lock);
  948. segno = GET_SEGNO(sbi, blkaddr);
  949. se = get_seg_entry(sbi, segno);
  950. offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
  951. if (f2fs_test_bit(offset, se->ckpt_valid_map))
  952. is_cp = true;
  953. mutex_unlock(&sit_i->sentry_lock);
  954. return is_cp;
  955. }
  956. /*
  957. * This function should be resided under the curseg_mutex lock
  958. */
  959. static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
  960. struct f2fs_summary *sum)
  961. {
  962. struct curseg_info *curseg = CURSEG_I(sbi, type);
  963. void *addr = curseg->sum_blk;
  964. addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
  965. memcpy(addr, sum, sizeof(struct f2fs_summary));
  966. }
  967. /*
  968. * Calculate the number of current summary pages for writing
  969. */
  970. int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
  971. {
  972. int valid_sum_count = 0;
  973. int i, sum_in_page;
  974. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  975. if (sbi->ckpt->alloc_type[i] == SSR)
  976. valid_sum_count += sbi->blocks_per_seg;
  977. else {
  978. if (for_ra)
  979. valid_sum_count += le16_to_cpu(
  980. F2FS_CKPT(sbi)->cur_data_blkoff[i]);
  981. else
  982. valid_sum_count += curseg_blkoff(sbi, i);
  983. }
  984. }
  985. sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
  986. SUM_FOOTER_SIZE) / SUMMARY_SIZE;
  987. if (valid_sum_count <= sum_in_page)
  988. return 1;
  989. else if ((valid_sum_count - sum_in_page) <=
  990. (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
  991. return 2;
  992. return 3;
  993. }
  994. /*
  995. * Caller should put this summary page
  996. */
  997. struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
  998. {
  999. return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
  1000. }
  1001. void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
  1002. {
  1003. struct page *page = grab_meta_page(sbi, blk_addr);
  1004. void *dst = page_address(page);
  1005. if (src)
  1006. memcpy(dst, src, PAGE_SIZE);
  1007. else
  1008. memset(dst, 0, PAGE_SIZE);
  1009. set_page_dirty(page);
  1010. f2fs_put_page(page, 1);
  1011. }
  1012. static void write_sum_page(struct f2fs_sb_info *sbi,
  1013. struct f2fs_summary_block *sum_blk, block_t blk_addr)
  1014. {
  1015. update_meta_page(sbi, (void *)sum_blk, blk_addr);
  1016. }
  1017. static void write_current_sum_page(struct f2fs_sb_info *sbi,
  1018. int type, block_t blk_addr)
  1019. {
  1020. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1021. struct page *page = grab_meta_page(sbi, blk_addr);
  1022. struct f2fs_summary_block *src = curseg->sum_blk;
  1023. struct f2fs_summary_block *dst;
  1024. dst = (struct f2fs_summary_block *)page_address(page);
  1025. mutex_lock(&curseg->curseg_mutex);
  1026. down_read(&curseg->journal_rwsem);
  1027. memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
  1028. up_read(&curseg->journal_rwsem);
  1029. memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
  1030. memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
  1031. mutex_unlock(&curseg->curseg_mutex);
  1032. set_page_dirty(page);
  1033. f2fs_put_page(page, 1);
  1034. }
  1035. static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
  1036. {
  1037. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1038. unsigned int segno = curseg->segno + 1;
  1039. struct free_segmap_info *free_i = FREE_I(sbi);
  1040. if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
  1041. return !test_bit(segno, free_i->free_segmap);
  1042. return 0;
  1043. }
  1044. /*
  1045. * Find a new segment from the free segments bitmap to right order
  1046. * This function should be returned with success, otherwise BUG
  1047. */
  1048. static void get_new_segment(struct f2fs_sb_info *sbi,
  1049. unsigned int *newseg, bool new_sec, int dir)
  1050. {
  1051. struct free_segmap_info *free_i = FREE_I(sbi);
  1052. unsigned int segno, secno, zoneno;
  1053. unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
  1054. unsigned int hint = *newseg / sbi->segs_per_sec;
  1055. unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
  1056. unsigned int left_start = hint;
  1057. bool init = true;
  1058. int go_left = 0;
  1059. int i;
  1060. spin_lock(&free_i->segmap_lock);
  1061. if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
  1062. segno = find_next_zero_bit(free_i->free_segmap,
  1063. (hint + 1) * sbi->segs_per_sec, *newseg + 1);
  1064. if (segno < (hint + 1) * sbi->segs_per_sec)
  1065. goto got_it;
  1066. }
  1067. find_other_zone:
  1068. secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
  1069. if (secno >= MAIN_SECS(sbi)) {
  1070. if (dir == ALLOC_RIGHT) {
  1071. secno = find_next_zero_bit(free_i->free_secmap,
  1072. MAIN_SECS(sbi), 0);
  1073. f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
  1074. } else {
  1075. go_left = 1;
  1076. left_start = hint - 1;
  1077. }
  1078. }
  1079. if (go_left == 0)
  1080. goto skip_left;
  1081. while (test_bit(left_start, free_i->free_secmap)) {
  1082. if (left_start > 0) {
  1083. left_start--;
  1084. continue;
  1085. }
  1086. left_start = find_next_zero_bit(free_i->free_secmap,
  1087. MAIN_SECS(sbi), 0);
  1088. f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
  1089. break;
  1090. }
  1091. secno = left_start;
  1092. skip_left:
  1093. hint = secno;
  1094. segno = secno * sbi->segs_per_sec;
  1095. zoneno = secno / sbi->secs_per_zone;
  1096. /* give up on finding another zone */
  1097. if (!init)
  1098. goto got_it;
  1099. if (sbi->secs_per_zone == 1)
  1100. goto got_it;
  1101. if (zoneno == old_zoneno)
  1102. goto got_it;
  1103. if (dir == ALLOC_LEFT) {
  1104. if (!go_left && zoneno + 1 >= total_zones)
  1105. goto got_it;
  1106. if (go_left && zoneno == 0)
  1107. goto got_it;
  1108. }
  1109. for (i = 0; i < NR_CURSEG_TYPE; i++)
  1110. if (CURSEG_I(sbi, i)->zone == zoneno)
  1111. break;
  1112. if (i < NR_CURSEG_TYPE) {
  1113. /* zone is in user, try another */
  1114. if (go_left)
  1115. hint = zoneno * sbi->secs_per_zone - 1;
  1116. else if (zoneno + 1 >= total_zones)
  1117. hint = 0;
  1118. else
  1119. hint = (zoneno + 1) * sbi->secs_per_zone;
  1120. init = false;
  1121. goto find_other_zone;
  1122. }
  1123. got_it:
  1124. /* set it as dirty segment in free segmap */
  1125. f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
  1126. __set_inuse(sbi, segno);
  1127. *newseg = segno;
  1128. spin_unlock(&free_i->segmap_lock);
  1129. }
  1130. static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
  1131. {
  1132. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1133. struct summary_footer *sum_footer;
  1134. curseg->segno = curseg->next_segno;
  1135. curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
  1136. curseg->next_blkoff = 0;
  1137. curseg->next_segno = NULL_SEGNO;
  1138. sum_footer = &(curseg->sum_blk->footer);
  1139. memset(sum_footer, 0, sizeof(struct summary_footer));
  1140. if (IS_DATASEG(type))
  1141. SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
  1142. if (IS_NODESEG(type))
  1143. SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
  1144. __set_sit_entry_type(sbi, type, curseg->segno, modified);
  1145. }
  1146. /*
  1147. * Allocate a current working segment.
  1148. * This function always allocates a free segment in LFS manner.
  1149. */
  1150. static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
  1151. {
  1152. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1153. unsigned int segno = curseg->segno;
  1154. int dir = ALLOC_LEFT;
  1155. write_sum_page(sbi, curseg->sum_blk,
  1156. GET_SUM_BLOCK(sbi, segno));
  1157. if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
  1158. dir = ALLOC_RIGHT;
  1159. if (test_opt(sbi, NOHEAP))
  1160. dir = ALLOC_RIGHT;
  1161. get_new_segment(sbi, &segno, new_sec, dir);
  1162. curseg->next_segno = segno;
  1163. reset_curseg(sbi, type, 1);
  1164. curseg->alloc_type = LFS;
  1165. }
  1166. static void __next_free_blkoff(struct f2fs_sb_info *sbi,
  1167. struct curseg_info *seg, block_t start)
  1168. {
  1169. struct seg_entry *se = get_seg_entry(sbi, seg->segno);
  1170. int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
  1171. unsigned long *target_map = SIT_I(sbi)->tmp_map;
  1172. unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
  1173. unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
  1174. int i, pos;
  1175. for (i = 0; i < entries; i++)
  1176. target_map[i] = ckpt_map[i] | cur_map[i];
  1177. pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
  1178. seg->next_blkoff = pos;
  1179. }
  1180. /*
  1181. * If a segment is written by LFS manner, next block offset is just obtained
  1182. * by increasing the current block offset. However, if a segment is written by
  1183. * SSR manner, next block offset obtained by calling __next_free_blkoff
  1184. */
  1185. static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
  1186. struct curseg_info *seg)
  1187. {
  1188. if (seg->alloc_type == SSR)
  1189. __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
  1190. else
  1191. seg->next_blkoff++;
  1192. }
  1193. /*
  1194. * This function always allocates a used segment(from dirty seglist) by SSR
  1195. * manner, so it should recover the existing segment information of valid blocks
  1196. */
  1197. static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
  1198. {
  1199. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  1200. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1201. unsigned int new_segno = curseg->next_segno;
  1202. struct f2fs_summary_block *sum_node;
  1203. struct page *sum_page;
  1204. write_sum_page(sbi, curseg->sum_blk,
  1205. GET_SUM_BLOCK(sbi, curseg->segno));
  1206. __set_test_and_inuse(sbi, new_segno);
  1207. mutex_lock(&dirty_i->seglist_lock);
  1208. __remove_dirty_segment(sbi, new_segno, PRE);
  1209. __remove_dirty_segment(sbi, new_segno, DIRTY);
  1210. mutex_unlock(&dirty_i->seglist_lock);
  1211. reset_curseg(sbi, type, 1);
  1212. curseg->alloc_type = SSR;
  1213. __next_free_blkoff(sbi, curseg, 0);
  1214. if (reuse) {
  1215. sum_page = get_sum_page(sbi, new_segno);
  1216. sum_node = (struct f2fs_summary_block *)page_address(sum_page);
  1217. memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
  1218. f2fs_put_page(sum_page, 1);
  1219. }
  1220. }
  1221. static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
  1222. {
  1223. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1224. const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
  1225. if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0))
  1226. return v_ops->get_victim(sbi,
  1227. &(curseg)->next_segno, BG_GC, type, SSR);
  1228. /* For data segments, let's do SSR more intensively */
  1229. for (; type >= CURSEG_HOT_DATA; type--)
  1230. if (v_ops->get_victim(sbi, &(curseg)->next_segno,
  1231. BG_GC, type, SSR))
  1232. return 1;
  1233. return 0;
  1234. }
  1235. /*
  1236. * flush out current segment and replace it with new segment
  1237. * This function should be returned with success, otherwise BUG
  1238. */
  1239. static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
  1240. int type, bool force)
  1241. {
  1242. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1243. if (force)
  1244. new_curseg(sbi, type, true);
  1245. else if (type == CURSEG_WARM_NODE)
  1246. new_curseg(sbi, type, false);
  1247. else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
  1248. new_curseg(sbi, type, false);
  1249. else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
  1250. change_curseg(sbi, type, true);
  1251. else
  1252. new_curseg(sbi, type, false);
  1253. stat_inc_seg_type(sbi, curseg);
  1254. }
  1255. void allocate_new_segments(struct f2fs_sb_info *sbi)
  1256. {
  1257. struct curseg_info *curseg;
  1258. unsigned int old_segno;
  1259. int i;
  1260. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  1261. curseg = CURSEG_I(sbi, i);
  1262. old_segno = curseg->segno;
  1263. SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
  1264. locate_dirty_segment(sbi, old_segno);
  1265. }
  1266. }
  1267. static const struct segment_allocation default_salloc_ops = {
  1268. .allocate_segment = allocate_segment_by_default,
  1269. };
  1270. bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  1271. {
  1272. __u64 trim_start = cpc->trim_start;
  1273. bool has_candidate = false;
  1274. mutex_lock(&SIT_I(sbi)->sentry_lock);
  1275. for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
  1276. if (add_discard_addrs(sbi, cpc, true)) {
  1277. has_candidate = true;
  1278. break;
  1279. }
  1280. }
  1281. mutex_unlock(&SIT_I(sbi)->sentry_lock);
  1282. cpc->trim_start = trim_start;
  1283. return has_candidate;
  1284. }
  1285. int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
  1286. {
  1287. __u64 start = F2FS_BYTES_TO_BLK(range->start);
  1288. __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
  1289. unsigned int start_segno, end_segno;
  1290. struct cp_control cpc;
  1291. int err = 0;
  1292. if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
  1293. return -EINVAL;
  1294. cpc.trimmed = 0;
  1295. if (end <= MAIN_BLKADDR(sbi))
  1296. goto out;
  1297. if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
  1298. f2fs_msg(sbi->sb, KERN_WARNING,
  1299. "Found FS corruption, run fsck to fix.");
  1300. goto out;
  1301. }
  1302. /* start/end segment number in main_area */
  1303. start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
  1304. end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
  1305. GET_SEGNO(sbi, end);
  1306. cpc.reason = CP_DISCARD;
  1307. cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
  1308. /* do checkpoint to issue discard commands safely */
  1309. for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
  1310. cpc.trim_start = start_segno;
  1311. if (sbi->discard_blks == 0)
  1312. break;
  1313. else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
  1314. cpc.trim_end = end_segno;
  1315. else
  1316. cpc.trim_end = min_t(unsigned int,
  1317. rounddown(start_segno +
  1318. BATCHED_TRIM_SEGMENTS(sbi),
  1319. sbi->segs_per_sec) - 1, end_segno);
  1320. mutex_lock(&sbi->gc_mutex);
  1321. err = write_checkpoint(sbi, &cpc);
  1322. mutex_unlock(&sbi->gc_mutex);
  1323. if (err)
  1324. break;
  1325. schedule();
  1326. }
  1327. out:
  1328. range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
  1329. return err;
  1330. }
  1331. static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
  1332. {
  1333. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1334. if (curseg->next_blkoff < sbi->blocks_per_seg)
  1335. return true;
  1336. return false;
  1337. }
  1338. static int __get_segment_type_2(struct page *page, enum page_type p_type)
  1339. {
  1340. if (p_type == DATA)
  1341. return CURSEG_HOT_DATA;
  1342. else
  1343. return CURSEG_HOT_NODE;
  1344. }
  1345. static int __get_segment_type_4(struct page *page, enum page_type p_type)
  1346. {
  1347. if (p_type == DATA) {
  1348. struct inode *inode = page->mapping->host;
  1349. if (S_ISDIR(inode->i_mode))
  1350. return CURSEG_HOT_DATA;
  1351. else
  1352. return CURSEG_COLD_DATA;
  1353. } else {
  1354. if (IS_DNODE(page) && is_cold_node(page))
  1355. return CURSEG_WARM_NODE;
  1356. else
  1357. return CURSEG_COLD_NODE;
  1358. }
  1359. }
  1360. static int __get_segment_type_6(struct page *page, enum page_type p_type)
  1361. {
  1362. if (p_type == DATA) {
  1363. struct inode *inode = page->mapping->host;
  1364. if (S_ISDIR(inode->i_mode))
  1365. return CURSEG_HOT_DATA;
  1366. else if (is_cold_data(page) || file_is_cold(inode))
  1367. return CURSEG_COLD_DATA;
  1368. else
  1369. return CURSEG_WARM_DATA;
  1370. } else {
  1371. if (IS_DNODE(page))
  1372. return is_cold_node(page) ? CURSEG_WARM_NODE :
  1373. CURSEG_HOT_NODE;
  1374. else
  1375. return CURSEG_COLD_NODE;
  1376. }
  1377. }
  1378. static int __get_segment_type(struct page *page, enum page_type p_type)
  1379. {
  1380. switch (F2FS_P_SB(page)->active_logs) {
  1381. case 2:
  1382. return __get_segment_type_2(page, p_type);
  1383. case 4:
  1384. return __get_segment_type_4(page, p_type);
  1385. }
  1386. /* NR_CURSEG_TYPE(6) logs by default */
  1387. f2fs_bug_on(F2FS_P_SB(page),
  1388. F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
  1389. return __get_segment_type_6(page, p_type);
  1390. }
  1391. void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
  1392. block_t old_blkaddr, block_t *new_blkaddr,
  1393. struct f2fs_summary *sum, int type)
  1394. {
  1395. struct sit_info *sit_i = SIT_I(sbi);
  1396. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1397. mutex_lock(&curseg->curseg_mutex);
  1398. mutex_lock(&sit_i->sentry_lock);
  1399. *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
  1400. f2fs_wait_discard_bio(sbi, *new_blkaddr);
  1401. /*
  1402. * __add_sum_entry should be resided under the curseg_mutex
  1403. * because, this function updates a summary entry in the
  1404. * current summary block.
  1405. */
  1406. __add_sum_entry(sbi, type, sum);
  1407. __refresh_next_blkoff(sbi, curseg);
  1408. stat_inc_block_count(sbi, curseg);
  1409. if (!__has_curseg_space(sbi, type))
  1410. sit_i->s_ops->allocate_segment(sbi, type, false);
  1411. /*
  1412. * SIT information should be updated before segment allocation,
  1413. * since SSR needs latest valid block information.
  1414. */
  1415. refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
  1416. mutex_unlock(&sit_i->sentry_lock);
  1417. if (page && IS_NODESEG(type))
  1418. fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
  1419. mutex_unlock(&curseg->curseg_mutex);
  1420. }
  1421. static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
  1422. {
  1423. int type = __get_segment_type(fio->page, fio->type);
  1424. int err;
  1425. if (fio->type == NODE || fio->type == DATA)
  1426. mutex_lock(&fio->sbi->wio_mutex[fio->type]);
  1427. reallocate:
  1428. allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
  1429. &fio->new_blkaddr, sum, type);
  1430. /* writeout dirty page into bdev */
  1431. err = f2fs_submit_page_mbio(fio);
  1432. if (err == -EAGAIN) {
  1433. fio->old_blkaddr = fio->new_blkaddr;
  1434. goto reallocate;
  1435. }
  1436. if (fio->type == NODE || fio->type == DATA)
  1437. mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
  1438. }
  1439. void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
  1440. {
  1441. struct f2fs_io_info fio = {
  1442. .sbi = sbi,
  1443. .type = META,
  1444. .op = REQ_OP_WRITE,
  1445. .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
  1446. .old_blkaddr = page->index,
  1447. .new_blkaddr = page->index,
  1448. .page = page,
  1449. .encrypted_page = NULL,
  1450. };
  1451. if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
  1452. fio.op_flags &= ~REQ_META;
  1453. set_page_writeback(page);
  1454. f2fs_submit_page_mbio(&fio);
  1455. }
  1456. void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
  1457. {
  1458. struct f2fs_summary sum;
  1459. set_summary(&sum, nid, 0, 0);
  1460. do_write_page(&sum, fio);
  1461. }
  1462. void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
  1463. {
  1464. struct f2fs_sb_info *sbi = fio->sbi;
  1465. struct f2fs_summary sum;
  1466. struct node_info ni;
  1467. f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
  1468. get_node_info(sbi, dn->nid, &ni);
  1469. set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
  1470. do_write_page(&sum, fio);
  1471. f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
  1472. }
  1473. void rewrite_data_page(struct f2fs_io_info *fio)
  1474. {
  1475. fio->new_blkaddr = fio->old_blkaddr;
  1476. stat_inc_inplace_blocks(fio->sbi);
  1477. f2fs_submit_page_mbio(fio);
  1478. }
  1479. void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
  1480. block_t old_blkaddr, block_t new_blkaddr,
  1481. bool recover_curseg, bool recover_newaddr)
  1482. {
  1483. struct sit_info *sit_i = SIT_I(sbi);
  1484. struct curseg_info *curseg;
  1485. unsigned int segno, old_cursegno;
  1486. struct seg_entry *se;
  1487. int type;
  1488. unsigned short old_blkoff;
  1489. segno = GET_SEGNO(sbi, new_blkaddr);
  1490. se = get_seg_entry(sbi, segno);
  1491. type = se->type;
  1492. if (!recover_curseg) {
  1493. /* for recovery flow */
  1494. if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
  1495. if (old_blkaddr == NULL_ADDR)
  1496. type = CURSEG_COLD_DATA;
  1497. else
  1498. type = CURSEG_WARM_DATA;
  1499. }
  1500. } else {
  1501. if (!IS_CURSEG(sbi, segno))
  1502. type = CURSEG_WARM_DATA;
  1503. }
  1504. curseg = CURSEG_I(sbi, type);
  1505. mutex_lock(&curseg->curseg_mutex);
  1506. mutex_lock(&sit_i->sentry_lock);
  1507. old_cursegno = curseg->segno;
  1508. old_blkoff = curseg->next_blkoff;
  1509. /* change the current segment */
  1510. if (segno != curseg->segno) {
  1511. curseg->next_segno = segno;
  1512. change_curseg(sbi, type, true);
  1513. }
  1514. curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
  1515. __add_sum_entry(sbi, type, sum);
  1516. if (!recover_curseg || recover_newaddr)
  1517. update_sit_entry(sbi, new_blkaddr, 1);
  1518. if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
  1519. update_sit_entry(sbi, old_blkaddr, -1);
  1520. locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
  1521. locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
  1522. locate_dirty_segment(sbi, old_cursegno);
  1523. if (recover_curseg) {
  1524. if (old_cursegno != curseg->segno) {
  1525. curseg->next_segno = old_cursegno;
  1526. change_curseg(sbi, type, true);
  1527. }
  1528. curseg->next_blkoff = old_blkoff;
  1529. }
  1530. mutex_unlock(&sit_i->sentry_lock);
  1531. mutex_unlock(&curseg->curseg_mutex);
  1532. }
  1533. void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
  1534. block_t old_addr, block_t new_addr,
  1535. unsigned char version, bool recover_curseg,
  1536. bool recover_newaddr)
  1537. {
  1538. struct f2fs_summary sum;
  1539. set_summary(&sum, dn->nid, dn->ofs_in_node, version);
  1540. __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
  1541. recover_curseg, recover_newaddr);
  1542. f2fs_update_data_blkaddr(dn, new_addr);
  1543. }
  1544. void f2fs_wait_on_page_writeback(struct page *page,
  1545. enum page_type type, bool ordered)
  1546. {
  1547. if (PageWriteback(page)) {
  1548. struct f2fs_sb_info *sbi = F2FS_P_SB(page);
  1549. f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, type, WRITE);
  1550. if (ordered)
  1551. wait_on_page_writeback(page);
  1552. else
  1553. wait_for_stable_page(page);
  1554. }
  1555. }
  1556. void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
  1557. block_t blkaddr)
  1558. {
  1559. struct page *cpage;
  1560. if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
  1561. return;
  1562. cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
  1563. if (cpage) {
  1564. f2fs_wait_on_page_writeback(cpage, DATA, true);
  1565. f2fs_put_page(cpage, 1);
  1566. }
  1567. }
  1568. static int read_compacted_summaries(struct f2fs_sb_info *sbi)
  1569. {
  1570. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  1571. struct curseg_info *seg_i;
  1572. unsigned char *kaddr;
  1573. struct page *page;
  1574. block_t start;
  1575. int i, j, offset;
  1576. start = start_sum_block(sbi);
  1577. page = get_meta_page(sbi, start++);
  1578. kaddr = (unsigned char *)page_address(page);
  1579. /* Step 1: restore nat cache */
  1580. seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
  1581. memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
  1582. /* Step 2: restore sit cache */
  1583. seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
  1584. memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
  1585. offset = 2 * SUM_JOURNAL_SIZE;
  1586. /* Step 3: restore summary entries */
  1587. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  1588. unsigned short blk_off;
  1589. unsigned int segno;
  1590. seg_i = CURSEG_I(sbi, i);
  1591. segno = le32_to_cpu(ckpt->cur_data_segno[i]);
  1592. blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
  1593. seg_i->next_segno = segno;
  1594. reset_curseg(sbi, i, 0);
  1595. seg_i->alloc_type = ckpt->alloc_type[i];
  1596. seg_i->next_blkoff = blk_off;
  1597. if (seg_i->alloc_type == SSR)
  1598. blk_off = sbi->blocks_per_seg;
  1599. for (j = 0; j < blk_off; j++) {
  1600. struct f2fs_summary *s;
  1601. s = (struct f2fs_summary *)(kaddr + offset);
  1602. seg_i->sum_blk->entries[j] = *s;
  1603. offset += SUMMARY_SIZE;
  1604. if (offset + SUMMARY_SIZE <= PAGE_SIZE -
  1605. SUM_FOOTER_SIZE)
  1606. continue;
  1607. f2fs_put_page(page, 1);
  1608. page = NULL;
  1609. page = get_meta_page(sbi, start++);
  1610. kaddr = (unsigned char *)page_address(page);
  1611. offset = 0;
  1612. }
  1613. }
  1614. f2fs_put_page(page, 1);
  1615. return 0;
  1616. }
  1617. static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
  1618. {
  1619. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  1620. struct f2fs_summary_block *sum;
  1621. struct curseg_info *curseg;
  1622. struct page *new;
  1623. unsigned short blk_off;
  1624. unsigned int segno = 0;
  1625. block_t blk_addr = 0;
  1626. /* get segment number and block addr */
  1627. if (IS_DATASEG(type)) {
  1628. segno = le32_to_cpu(ckpt->cur_data_segno[type]);
  1629. blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
  1630. CURSEG_HOT_DATA]);
  1631. if (__exist_node_summaries(sbi))
  1632. blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
  1633. else
  1634. blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
  1635. } else {
  1636. segno = le32_to_cpu(ckpt->cur_node_segno[type -
  1637. CURSEG_HOT_NODE]);
  1638. blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
  1639. CURSEG_HOT_NODE]);
  1640. if (__exist_node_summaries(sbi))
  1641. blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
  1642. type - CURSEG_HOT_NODE);
  1643. else
  1644. blk_addr = GET_SUM_BLOCK(sbi, segno);
  1645. }
  1646. new = get_meta_page(sbi, blk_addr);
  1647. sum = (struct f2fs_summary_block *)page_address(new);
  1648. if (IS_NODESEG(type)) {
  1649. if (__exist_node_summaries(sbi)) {
  1650. struct f2fs_summary *ns = &sum->entries[0];
  1651. int i;
  1652. for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
  1653. ns->version = 0;
  1654. ns->ofs_in_node = 0;
  1655. }
  1656. } else {
  1657. int err;
  1658. err = restore_node_summary(sbi, segno, sum);
  1659. if (err) {
  1660. f2fs_put_page(new, 1);
  1661. return err;
  1662. }
  1663. }
  1664. }
  1665. /* set uncompleted segment to curseg */
  1666. curseg = CURSEG_I(sbi, type);
  1667. mutex_lock(&curseg->curseg_mutex);
  1668. /* update journal info */
  1669. down_write(&curseg->journal_rwsem);
  1670. memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
  1671. up_write(&curseg->journal_rwsem);
  1672. memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
  1673. memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
  1674. curseg->next_segno = segno;
  1675. reset_curseg(sbi, type, 0);
  1676. curseg->alloc_type = ckpt->alloc_type[type];
  1677. curseg->next_blkoff = blk_off;
  1678. mutex_unlock(&curseg->curseg_mutex);
  1679. f2fs_put_page(new, 1);
  1680. return 0;
  1681. }
  1682. static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
  1683. {
  1684. int type = CURSEG_HOT_DATA;
  1685. int err;
  1686. if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
  1687. int npages = npages_for_summary_flush(sbi, true);
  1688. if (npages >= 2)
  1689. ra_meta_pages(sbi, start_sum_block(sbi), npages,
  1690. META_CP, true);
  1691. /* restore for compacted data summary */
  1692. if (read_compacted_summaries(sbi))
  1693. return -EINVAL;
  1694. type = CURSEG_HOT_NODE;
  1695. }
  1696. if (__exist_node_summaries(sbi))
  1697. ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
  1698. NR_CURSEG_TYPE - type, META_CP, true);
  1699. for (; type <= CURSEG_COLD_NODE; type++) {
  1700. err = read_normal_summaries(sbi, type);
  1701. if (err)
  1702. return err;
  1703. }
  1704. return 0;
  1705. }
  1706. static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
  1707. {
  1708. struct page *page;
  1709. unsigned char *kaddr;
  1710. struct f2fs_summary *summary;
  1711. struct curseg_info *seg_i;
  1712. int written_size = 0;
  1713. int i, j;
  1714. page = grab_meta_page(sbi, blkaddr++);
  1715. kaddr = (unsigned char *)page_address(page);
  1716. /* Step 1: write nat cache */
  1717. seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
  1718. memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
  1719. written_size += SUM_JOURNAL_SIZE;
  1720. /* Step 2: write sit cache */
  1721. seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
  1722. memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
  1723. written_size += SUM_JOURNAL_SIZE;
  1724. /* Step 3: write summary entries */
  1725. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  1726. unsigned short blkoff;
  1727. seg_i = CURSEG_I(sbi, i);
  1728. if (sbi->ckpt->alloc_type[i] == SSR)
  1729. blkoff = sbi->blocks_per_seg;
  1730. else
  1731. blkoff = curseg_blkoff(sbi, i);
  1732. for (j = 0; j < blkoff; j++) {
  1733. if (!page) {
  1734. page = grab_meta_page(sbi, blkaddr++);
  1735. kaddr = (unsigned char *)page_address(page);
  1736. written_size = 0;
  1737. }
  1738. summary = (struct f2fs_summary *)(kaddr + written_size);
  1739. *summary = seg_i->sum_blk->entries[j];
  1740. written_size += SUMMARY_SIZE;
  1741. if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
  1742. SUM_FOOTER_SIZE)
  1743. continue;
  1744. set_page_dirty(page);
  1745. f2fs_put_page(page, 1);
  1746. page = NULL;
  1747. }
  1748. }
  1749. if (page) {
  1750. set_page_dirty(page);
  1751. f2fs_put_page(page, 1);
  1752. }
  1753. }
  1754. static void write_normal_summaries(struct f2fs_sb_info *sbi,
  1755. block_t blkaddr, int type)
  1756. {
  1757. int i, end;
  1758. if (IS_DATASEG(type))
  1759. end = type + NR_CURSEG_DATA_TYPE;
  1760. else
  1761. end = type + NR_CURSEG_NODE_TYPE;
  1762. for (i = type; i < end; i++)
  1763. write_current_sum_page(sbi, i, blkaddr + (i - type));
  1764. }
  1765. void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
  1766. {
  1767. if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
  1768. write_compacted_summaries(sbi, start_blk);
  1769. else
  1770. write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
  1771. }
  1772. void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
  1773. {
  1774. write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
  1775. }
  1776. int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
  1777. unsigned int val, int alloc)
  1778. {
  1779. int i;
  1780. if (type == NAT_JOURNAL) {
  1781. for (i = 0; i < nats_in_cursum(journal); i++) {
  1782. if (le32_to_cpu(nid_in_journal(journal, i)) == val)
  1783. return i;
  1784. }
  1785. if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
  1786. return update_nats_in_cursum(journal, 1);
  1787. } else if (type == SIT_JOURNAL) {
  1788. for (i = 0; i < sits_in_cursum(journal); i++)
  1789. if (le32_to_cpu(segno_in_journal(journal, i)) == val)
  1790. return i;
  1791. if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
  1792. return update_sits_in_cursum(journal, 1);
  1793. }
  1794. return -1;
  1795. }
  1796. static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
  1797. unsigned int segno)
  1798. {
  1799. return get_meta_page(sbi, current_sit_addr(sbi, segno));
  1800. }
  1801. static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
  1802. unsigned int start)
  1803. {
  1804. struct sit_info *sit_i = SIT_I(sbi);
  1805. struct page *src_page, *dst_page;
  1806. pgoff_t src_off, dst_off;
  1807. void *src_addr, *dst_addr;
  1808. src_off = current_sit_addr(sbi, start);
  1809. dst_off = next_sit_addr(sbi, src_off);
  1810. /* get current sit block page without lock */
  1811. src_page = get_meta_page(sbi, src_off);
  1812. dst_page = grab_meta_page(sbi, dst_off);
  1813. f2fs_bug_on(sbi, PageDirty(src_page));
  1814. src_addr = page_address(src_page);
  1815. dst_addr = page_address(dst_page);
  1816. memcpy(dst_addr, src_addr, PAGE_SIZE);
  1817. set_page_dirty(dst_page);
  1818. f2fs_put_page(src_page, 1);
  1819. set_to_next_sit(sit_i, start);
  1820. return dst_page;
  1821. }
  1822. static struct sit_entry_set *grab_sit_entry_set(void)
  1823. {
  1824. struct sit_entry_set *ses =
  1825. f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
  1826. ses->entry_cnt = 0;
  1827. INIT_LIST_HEAD(&ses->set_list);
  1828. return ses;
  1829. }
  1830. static void release_sit_entry_set(struct sit_entry_set *ses)
  1831. {
  1832. list_del(&ses->set_list);
  1833. kmem_cache_free(sit_entry_set_slab, ses);
  1834. }
  1835. static void adjust_sit_entry_set(struct sit_entry_set *ses,
  1836. struct list_head *head)
  1837. {
  1838. struct sit_entry_set *next = ses;
  1839. if (list_is_last(&ses->set_list, head))
  1840. return;
  1841. list_for_each_entry_continue(next, head, set_list)
  1842. if (ses->entry_cnt <= next->entry_cnt)
  1843. break;
  1844. list_move_tail(&ses->set_list, &next->set_list);
  1845. }
  1846. static void add_sit_entry(unsigned int segno, struct list_head *head)
  1847. {
  1848. struct sit_entry_set *ses;
  1849. unsigned int start_segno = START_SEGNO(segno);
  1850. list_for_each_entry(ses, head, set_list) {
  1851. if (ses->start_segno == start_segno) {
  1852. ses->entry_cnt++;
  1853. adjust_sit_entry_set(ses, head);
  1854. return;
  1855. }
  1856. }
  1857. ses = grab_sit_entry_set();
  1858. ses->start_segno = start_segno;
  1859. ses->entry_cnt++;
  1860. list_add(&ses->set_list, head);
  1861. }
  1862. static void add_sits_in_set(struct f2fs_sb_info *sbi)
  1863. {
  1864. struct f2fs_sm_info *sm_info = SM_I(sbi);
  1865. struct list_head *set_list = &sm_info->sit_entry_set;
  1866. unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
  1867. unsigned int segno;
  1868. for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
  1869. add_sit_entry(segno, set_list);
  1870. }
  1871. static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
  1872. {
  1873. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
  1874. struct f2fs_journal *journal = curseg->journal;
  1875. int i;
  1876. down_write(&curseg->journal_rwsem);
  1877. for (i = 0; i < sits_in_cursum(journal); i++) {
  1878. unsigned int segno;
  1879. bool dirtied;
  1880. segno = le32_to_cpu(segno_in_journal(journal, i));
  1881. dirtied = __mark_sit_entry_dirty(sbi, segno);
  1882. if (!dirtied)
  1883. add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
  1884. }
  1885. update_sits_in_cursum(journal, -i);
  1886. up_write(&curseg->journal_rwsem);
  1887. }
  1888. /*
  1889. * CP calls this function, which flushes SIT entries including sit_journal,
  1890. * and moves prefree segs to free segs.
  1891. */
  1892. void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  1893. {
  1894. struct sit_info *sit_i = SIT_I(sbi);
  1895. unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
  1896. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
  1897. struct f2fs_journal *journal = curseg->journal;
  1898. struct sit_entry_set *ses, *tmp;
  1899. struct list_head *head = &SM_I(sbi)->sit_entry_set;
  1900. bool to_journal = true;
  1901. struct seg_entry *se;
  1902. mutex_lock(&sit_i->sentry_lock);
  1903. if (!sit_i->dirty_sentries)
  1904. goto out;
  1905. /*
  1906. * add and account sit entries of dirty bitmap in sit entry
  1907. * set temporarily
  1908. */
  1909. add_sits_in_set(sbi);
  1910. /*
  1911. * if there are no enough space in journal to store dirty sit
  1912. * entries, remove all entries from journal and add and account
  1913. * them in sit entry set.
  1914. */
  1915. if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
  1916. remove_sits_in_journal(sbi);
  1917. /*
  1918. * there are two steps to flush sit entries:
  1919. * #1, flush sit entries to journal in current cold data summary block.
  1920. * #2, flush sit entries to sit page.
  1921. */
  1922. list_for_each_entry_safe(ses, tmp, head, set_list) {
  1923. struct page *page = NULL;
  1924. struct f2fs_sit_block *raw_sit = NULL;
  1925. unsigned int start_segno = ses->start_segno;
  1926. unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
  1927. (unsigned long)MAIN_SEGS(sbi));
  1928. unsigned int segno = start_segno;
  1929. if (to_journal &&
  1930. !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
  1931. to_journal = false;
  1932. if (to_journal) {
  1933. down_write(&curseg->journal_rwsem);
  1934. } else {
  1935. page = get_next_sit_page(sbi, start_segno);
  1936. raw_sit = page_address(page);
  1937. }
  1938. /* flush dirty sit entries in region of current sit set */
  1939. for_each_set_bit_from(segno, bitmap, end) {
  1940. int offset, sit_offset;
  1941. se = get_seg_entry(sbi, segno);
  1942. /* add discard candidates */
  1943. if (cpc->reason != CP_DISCARD) {
  1944. cpc->trim_start = segno;
  1945. add_discard_addrs(sbi, cpc, false);
  1946. }
  1947. if (to_journal) {
  1948. offset = lookup_journal_in_cursum(journal,
  1949. SIT_JOURNAL, segno, 1);
  1950. f2fs_bug_on(sbi, offset < 0);
  1951. segno_in_journal(journal, offset) =
  1952. cpu_to_le32(segno);
  1953. seg_info_to_raw_sit(se,
  1954. &sit_in_journal(journal, offset));
  1955. } else {
  1956. sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
  1957. seg_info_to_raw_sit(se,
  1958. &raw_sit->entries[sit_offset]);
  1959. }
  1960. __clear_bit(segno, bitmap);
  1961. sit_i->dirty_sentries--;
  1962. ses->entry_cnt--;
  1963. }
  1964. if (to_journal)
  1965. up_write(&curseg->journal_rwsem);
  1966. else
  1967. f2fs_put_page(page, 1);
  1968. f2fs_bug_on(sbi, ses->entry_cnt);
  1969. release_sit_entry_set(ses);
  1970. }
  1971. f2fs_bug_on(sbi, !list_empty(head));
  1972. f2fs_bug_on(sbi, sit_i->dirty_sentries);
  1973. out:
  1974. if (cpc->reason == CP_DISCARD) {
  1975. __u64 trim_start = cpc->trim_start;
  1976. for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
  1977. add_discard_addrs(sbi, cpc, false);
  1978. cpc->trim_start = trim_start;
  1979. }
  1980. mutex_unlock(&sit_i->sentry_lock);
  1981. set_prefree_as_free_segments(sbi);
  1982. }
  1983. static int build_sit_info(struct f2fs_sb_info *sbi)
  1984. {
  1985. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  1986. struct sit_info *sit_i;
  1987. unsigned int sit_segs, start;
  1988. char *src_bitmap;
  1989. unsigned int bitmap_size;
  1990. /* allocate memory for SIT information */
  1991. sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
  1992. if (!sit_i)
  1993. return -ENOMEM;
  1994. SM_I(sbi)->sit_info = sit_i;
  1995. sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) *
  1996. sizeof(struct seg_entry), GFP_KERNEL);
  1997. if (!sit_i->sentries)
  1998. return -ENOMEM;
  1999. bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
  2000. sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
  2001. if (!sit_i->dirty_sentries_bitmap)
  2002. return -ENOMEM;
  2003. for (start = 0; start < MAIN_SEGS(sbi); start++) {
  2004. sit_i->sentries[start].cur_valid_map
  2005. = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2006. sit_i->sentries[start].ckpt_valid_map
  2007. = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2008. if (!sit_i->sentries[start].cur_valid_map ||
  2009. !sit_i->sentries[start].ckpt_valid_map)
  2010. return -ENOMEM;
  2011. #ifdef CONFIG_F2FS_CHECK_FS
  2012. sit_i->sentries[start].cur_valid_map_mir
  2013. = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2014. if (!sit_i->sentries[start].cur_valid_map_mir)
  2015. return -ENOMEM;
  2016. #endif
  2017. if (f2fs_discard_en(sbi)) {
  2018. sit_i->sentries[start].discard_map
  2019. = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2020. if (!sit_i->sentries[start].discard_map)
  2021. return -ENOMEM;
  2022. }
  2023. }
  2024. sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2025. if (!sit_i->tmp_map)
  2026. return -ENOMEM;
  2027. if (sbi->segs_per_sec > 1) {
  2028. sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) *
  2029. sizeof(struct sec_entry), GFP_KERNEL);
  2030. if (!sit_i->sec_entries)
  2031. return -ENOMEM;
  2032. }
  2033. /* get information related with SIT */
  2034. sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
  2035. /* setup SIT bitmap from ckeckpoint pack */
  2036. bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
  2037. src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
  2038. sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
  2039. if (!sit_i->sit_bitmap)
  2040. return -ENOMEM;
  2041. #ifdef CONFIG_F2FS_CHECK_FS
  2042. sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
  2043. if (!sit_i->sit_bitmap_mir)
  2044. return -ENOMEM;
  2045. #endif
  2046. /* init SIT information */
  2047. sit_i->s_ops = &default_salloc_ops;
  2048. sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
  2049. sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
  2050. sit_i->written_valid_blocks = 0;
  2051. sit_i->bitmap_size = bitmap_size;
  2052. sit_i->dirty_sentries = 0;
  2053. sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
  2054. sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
  2055. sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
  2056. mutex_init(&sit_i->sentry_lock);
  2057. return 0;
  2058. }
  2059. static int build_free_segmap(struct f2fs_sb_info *sbi)
  2060. {
  2061. struct free_segmap_info *free_i;
  2062. unsigned int bitmap_size, sec_bitmap_size;
  2063. /* allocate memory for free segmap information */
  2064. free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
  2065. if (!free_i)
  2066. return -ENOMEM;
  2067. SM_I(sbi)->free_info = free_i;
  2068. bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
  2069. free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL);
  2070. if (!free_i->free_segmap)
  2071. return -ENOMEM;
  2072. sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
  2073. free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL);
  2074. if (!free_i->free_secmap)
  2075. return -ENOMEM;
  2076. /* set all segments as dirty temporarily */
  2077. memset(free_i->free_segmap, 0xff, bitmap_size);
  2078. memset(free_i->free_secmap, 0xff, sec_bitmap_size);
  2079. /* init free segmap information */
  2080. free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
  2081. free_i->free_segments = 0;
  2082. free_i->free_sections = 0;
  2083. spin_lock_init(&free_i->segmap_lock);
  2084. return 0;
  2085. }
  2086. static int build_curseg(struct f2fs_sb_info *sbi)
  2087. {
  2088. struct curseg_info *array;
  2089. int i;
  2090. array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
  2091. if (!array)
  2092. return -ENOMEM;
  2093. SM_I(sbi)->curseg_array = array;
  2094. for (i = 0; i < NR_CURSEG_TYPE; i++) {
  2095. mutex_init(&array[i].curseg_mutex);
  2096. array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
  2097. if (!array[i].sum_blk)
  2098. return -ENOMEM;
  2099. init_rwsem(&array[i].journal_rwsem);
  2100. array[i].journal = kzalloc(sizeof(struct f2fs_journal),
  2101. GFP_KERNEL);
  2102. if (!array[i].journal)
  2103. return -ENOMEM;
  2104. array[i].segno = NULL_SEGNO;
  2105. array[i].next_blkoff = 0;
  2106. }
  2107. return restore_curseg_summaries(sbi);
  2108. }
  2109. static void build_sit_entries(struct f2fs_sb_info *sbi)
  2110. {
  2111. struct sit_info *sit_i = SIT_I(sbi);
  2112. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
  2113. struct f2fs_journal *journal = curseg->journal;
  2114. struct seg_entry *se;
  2115. struct f2fs_sit_entry sit;
  2116. int sit_blk_cnt = SIT_BLK_CNT(sbi);
  2117. unsigned int i, start, end;
  2118. unsigned int readed, start_blk = 0;
  2119. do {
  2120. readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
  2121. META_SIT, true);
  2122. start = start_blk * sit_i->sents_per_block;
  2123. end = (start_blk + readed) * sit_i->sents_per_block;
  2124. for (; start < end && start < MAIN_SEGS(sbi); start++) {
  2125. struct f2fs_sit_block *sit_blk;
  2126. struct page *page;
  2127. se = &sit_i->sentries[start];
  2128. page = get_current_sit_page(sbi, start);
  2129. sit_blk = (struct f2fs_sit_block *)page_address(page);
  2130. sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
  2131. f2fs_put_page(page, 1);
  2132. check_block_count(sbi, start, &sit);
  2133. seg_info_from_raw_sit(se, &sit);
  2134. /* build discard map only one time */
  2135. if (f2fs_discard_en(sbi)) {
  2136. memcpy(se->discard_map, se->cur_valid_map,
  2137. SIT_VBLOCK_MAP_SIZE);
  2138. sbi->discard_blks += sbi->blocks_per_seg -
  2139. se->valid_blocks;
  2140. }
  2141. if (sbi->segs_per_sec > 1)
  2142. get_sec_entry(sbi, start)->valid_blocks +=
  2143. se->valid_blocks;
  2144. }
  2145. start_blk += readed;
  2146. } while (start_blk < sit_blk_cnt);
  2147. down_read(&curseg->journal_rwsem);
  2148. for (i = 0; i < sits_in_cursum(journal); i++) {
  2149. unsigned int old_valid_blocks;
  2150. start = le32_to_cpu(segno_in_journal(journal, i));
  2151. se = &sit_i->sentries[start];
  2152. sit = sit_in_journal(journal, i);
  2153. old_valid_blocks = se->valid_blocks;
  2154. check_block_count(sbi, start, &sit);
  2155. seg_info_from_raw_sit(se, &sit);
  2156. if (f2fs_discard_en(sbi)) {
  2157. memcpy(se->discard_map, se->cur_valid_map,
  2158. SIT_VBLOCK_MAP_SIZE);
  2159. sbi->discard_blks += old_valid_blocks -
  2160. se->valid_blocks;
  2161. }
  2162. if (sbi->segs_per_sec > 1)
  2163. get_sec_entry(sbi, start)->valid_blocks +=
  2164. se->valid_blocks - old_valid_blocks;
  2165. }
  2166. up_read(&curseg->journal_rwsem);
  2167. }
  2168. static void init_free_segmap(struct f2fs_sb_info *sbi)
  2169. {
  2170. unsigned int start;
  2171. int type;
  2172. for (start = 0; start < MAIN_SEGS(sbi); start++) {
  2173. struct seg_entry *sentry = get_seg_entry(sbi, start);
  2174. if (!sentry->valid_blocks)
  2175. __set_free(sbi, start);
  2176. else
  2177. SIT_I(sbi)->written_valid_blocks +=
  2178. sentry->valid_blocks;
  2179. }
  2180. /* set use the current segments */
  2181. for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
  2182. struct curseg_info *curseg_t = CURSEG_I(sbi, type);
  2183. __set_test_and_inuse(sbi, curseg_t->segno);
  2184. }
  2185. }
  2186. static void init_dirty_segmap(struct f2fs_sb_info *sbi)
  2187. {
  2188. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2189. struct free_segmap_info *free_i = FREE_I(sbi);
  2190. unsigned int segno = 0, offset = 0;
  2191. unsigned short valid_blocks;
  2192. while (1) {
  2193. /* find dirty segment based on free segmap */
  2194. segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
  2195. if (segno >= MAIN_SEGS(sbi))
  2196. break;
  2197. offset = segno + 1;
  2198. valid_blocks = get_valid_blocks(sbi, segno, 0);
  2199. if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
  2200. continue;
  2201. if (valid_blocks > sbi->blocks_per_seg) {
  2202. f2fs_bug_on(sbi, 1);
  2203. continue;
  2204. }
  2205. mutex_lock(&dirty_i->seglist_lock);
  2206. __locate_dirty_segment(sbi, segno, DIRTY);
  2207. mutex_unlock(&dirty_i->seglist_lock);
  2208. }
  2209. }
  2210. static int init_victim_secmap(struct f2fs_sb_info *sbi)
  2211. {
  2212. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2213. unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
  2214. dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
  2215. if (!dirty_i->victim_secmap)
  2216. return -ENOMEM;
  2217. return 0;
  2218. }
  2219. static int build_dirty_segmap(struct f2fs_sb_info *sbi)
  2220. {
  2221. struct dirty_seglist_info *dirty_i;
  2222. unsigned int bitmap_size, i;
  2223. /* allocate memory for dirty segments list information */
  2224. dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
  2225. if (!dirty_i)
  2226. return -ENOMEM;
  2227. SM_I(sbi)->dirty_info = dirty_i;
  2228. mutex_init(&dirty_i->seglist_lock);
  2229. bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
  2230. for (i = 0; i < NR_DIRTY_TYPE; i++) {
  2231. dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
  2232. if (!dirty_i->dirty_segmap[i])
  2233. return -ENOMEM;
  2234. }
  2235. init_dirty_segmap(sbi);
  2236. return init_victim_secmap(sbi);
  2237. }
  2238. /*
  2239. * Update min, max modified time for cost-benefit GC algorithm
  2240. */
  2241. static void init_min_max_mtime(struct f2fs_sb_info *sbi)
  2242. {
  2243. struct sit_info *sit_i = SIT_I(sbi);
  2244. unsigned int segno;
  2245. mutex_lock(&sit_i->sentry_lock);
  2246. sit_i->min_mtime = LLONG_MAX;
  2247. for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
  2248. unsigned int i;
  2249. unsigned long long mtime = 0;
  2250. for (i = 0; i < sbi->segs_per_sec; i++)
  2251. mtime += get_seg_entry(sbi, segno + i)->mtime;
  2252. mtime = div_u64(mtime, sbi->segs_per_sec);
  2253. if (sit_i->min_mtime > mtime)
  2254. sit_i->min_mtime = mtime;
  2255. }
  2256. sit_i->max_mtime = get_mtime(sbi);
  2257. mutex_unlock(&sit_i->sentry_lock);
  2258. }
  2259. int build_segment_manager(struct f2fs_sb_info *sbi)
  2260. {
  2261. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  2262. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  2263. struct f2fs_sm_info *sm_info;
  2264. int err;
  2265. sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
  2266. if (!sm_info)
  2267. return -ENOMEM;
  2268. /* init sm info */
  2269. sbi->sm_info = sm_info;
  2270. sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
  2271. sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
  2272. sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
  2273. sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
  2274. sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
  2275. sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
  2276. sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
  2277. sm_info->rec_prefree_segments = sm_info->main_segments *
  2278. DEF_RECLAIM_PREFREE_SEGMENTS / 100;
  2279. if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
  2280. sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
  2281. if (!test_opt(sbi, LFS))
  2282. sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
  2283. sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
  2284. sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
  2285. sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
  2286. INIT_LIST_HEAD(&sm_info->sit_entry_set);
  2287. if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
  2288. err = create_flush_cmd_control(sbi);
  2289. if (err)
  2290. return err;
  2291. }
  2292. err = create_discard_cmd_control(sbi);
  2293. if (err)
  2294. return err;
  2295. err = build_sit_info(sbi);
  2296. if (err)
  2297. return err;
  2298. err = build_free_segmap(sbi);
  2299. if (err)
  2300. return err;
  2301. err = build_curseg(sbi);
  2302. if (err)
  2303. return err;
  2304. /* reinit free segmap based on SIT */
  2305. build_sit_entries(sbi);
  2306. init_free_segmap(sbi);
  2307. err = build_dirty_segmap(sbi);
  2308. if (err)
  2309. return err;
  2310. init_min_max_mtime(sbi);
  2311. return 0;
  2312. }
  2313. static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
  2314. enum dirty_type dirty_type)
  2315. {
  2316. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2317. mutex_lock(&dirty_i->seglist_lock);
  2318. kvfree(dirty_i->dirty_segmap[dirty_type]);
  2319. dirty_i->nr_dirty[dirty_type] = 0;
  2320. mutex_unlock(&dirty_i->seglist_lock);
  2321. }
  2322. static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
  2323. {
  2324. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2325. kvfree(dirty_i->victim_secmap);
  2326. }
  2327. static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
  2328. {
  2329. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2330. int i;
  2331. if (!dirty_i)
  2332. return;
  2333. /* discard pre-free/dirty segments list */
  2334. for (i = 0; i < NR_DIRTY_TYPE; i++)
  2335. discard_dirty_segmap(sbi, i);
  2336. destroy_victim_secmap(sbi);
  2337. SM_I(sbi)->dirty_info = NULL;
  2338. kfree(dirty_i);
  2339. }
  2340. static void destroy_curseg(struct f2fs_sb_info *sbi)
  2341. {
  2342. struct curseg_info *array = SM_I(sbi)->curseg_array;
  2343. int i;
  2344. if (!array)
  2345. return;
  2346. SM_I(sbi)->curseg_array = NULL;
  2347. for (i = 0; i < NR_CURSEG_TYPE; i++) {
  2348. kfree(array[i].sum_blk);
  2349. kfree(array[i].journal);
  2350. }
  2351. kfree(array);
  2352. }
  2353. static void destroy_free_segmap(struct f2fs_sb_info *sbi)
  2354. {
  2355. struct free_segmap_info *free_i = SM_I(sbi)->free_info;
  2356. if (!free_i)
  2357. return;
  2358. SM_I(sbi)->free_info = NULL;
  2359. kvfree(free_i->free_segmap);
  2360. kvfree(free_i->free_secmap);
  2361. kfree(free_i);
  2362. }
  2363. static void destroy_sit_info(struct f2fs_sb_info *sbi)
  2364. {
  2365. struct sit_info *sit_i = SIT_I(sbi);
  2366. unsigned int start;
  2367. if (!sit_i)
  2368. return;
  2369. if (sit_i->sentries) {
  2370. for (start = 0; start < MAIN_SEGS(sbi); start++) {
  2371. kfree(sit_i->sentries[start].cur_valid_map);
  2372. #ifdef CONFIG_F2FS_CHECK_FS
  2373. kfree(sit_i->sentries[start].cur_valid_map_mir);
  2374. #endif
  2375. kfree(sit_i->sentries[start].ckpt_valid_map);
  2376. kfree(sit_i->sentries[start].discard_map);
  2377. }
  2378. }
  2379. kfree(sit_i->tmp_map);
  2380. kvfree(sit_i->sentries);
  2381. kvfree(sit_i->sec_entries);
  2382. kvfree(sit_i->dirty_sentries_bitmap);
  2383. SM_I(sbi)->sit_info = NULL;
  2384. kfree(sit_i->sit_bitmap);
  2385. #ifdef CONFIG_F2FS_CHECK_FS
  2386. kfree(sit_i->sit_bitmap_mir);
  2387. #endif
  2388. kfree(sit_i);
  2389. }
  2390. void destroy_segment_manager(struct f2fs_sb_info *sbi)
  2391. {
  2392. struct f2fs_sm_info *sm_info = SM_I(sbi);
  2393. if (!sm_info)
  2394. return;
  2395. destroy_flush_cmd_control(sbi, true);
  2396. destroy_discard_cmd_control(sbi, true);
  2397. destroy_dirty_segmap(sbi);
  2398. destroy_curseg(sbi);
  2399. destroy_free_segmap(sbi);
  2400. destroy_sit_info(sbi);
  2401. sbi->sm_info = NULL;
  2402. kfree(sm_info);
  2403. }
  2404. int __init create_segment_manager_caches(void)
  2405. {
  2406. discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
  2407. sizeof(struct discard_entry));
  2408. if (!discard_entry_slab)
  2409. goto fail;
  2410. discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
  2411. sizeof(struct discard_cmd));
  2412. if (!discard_cmd_slab)
  2413. goto destroy_discard_entry;
  2414. sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
  2415. sizeof(struct sit_entry_set));
  2416. if (!sit_entry_set_slab)
  2417. goto destroy_discard_cmd;
  2418. inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
  2419. sizeof(struct inmem_pages));
  2420. if (!inmem_entry_slab)
  2421. goto destroy_sit_entry_set;
  2422. return 0;
  2423. destroy_sit_entry_set:
  2424. kmem_cache_destroy(sit_entry_set_slab);
  2425. destroy_discard_cmd:
  2426. kmem_cache_destroy(discard_cmd_slab);
  2427. destroy_discard_entry:
  2428. kmem_cache_destroy(discard_entry_slab);
  2429. fail:
  2430. return -ENOMEM;
  2431. }
  2432. void destroy_segment_manager_caches(void)
  2433. {
  2434. kmem_cache_destroy(sit_entry_set_slab);
  2435. kmem_cache_destroy(discard_cmd_slab);
  2436. kmem_cache_destroy(discard_entry_slab);
  2437. kmem_cache_destroy(inmem_entry_slab);
  2438. }