segment.c 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151
  1. /*
  2. * fs/f2fs/segment.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/f2fs_fs.h>
  13. #include <linux/bio.h>
  14. #include <linux/blkdev.h>
  15. #include <linux/prefetch.h>
  16. #include <linux/kthread.h>
  17. #include <linux/swap.h>
  18. #include <linux/timer.h>
  19. #include "f2fs.h"
  20. #include "segment.h"
  21. #include "node.h"
  22. #include "trace.h"
  23. #include <trace/events/f2fs.h>
  24. #define __reverse_ffz(x) __reverse_ffs(~(x))
  25. static struct kmem_cache *discard_entry_slab;
  26. static struct kmem_cache *discard_cmd_slab;
  27. static struct kmem_cache *sit_entry_set_slab;
  28. static struct kmem_cache *inmem_entry_slab;
  29. static unsigned long __reverse_ulong(unsigned char *str)
  30. {
  31. unsigned long tmp = 0;
  32. int shift = 24, idx = 0;
  33. #if BITS_PER_LONG == 64
  34. shift = 56;
  35. #endif
  36. while (shift >= 0) {
  37. tmp |= (unsigned long)str[idx++] << shift;
  38. shift -= BITS_PER_BYTE;
  39. }
  40. return tmp;
  41. }
  42. /*
  43. * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
  44. * MSB and LSB are reversed in a byte by f2fs_set_bit.
  45. */
  46. static inline unsigned long __reverse_ffs(unsigned long word)
  47. {
  48. int num = 0;
  49. #if BITS_PER_LONG == 64
  50. if ((word & 0xffffffff00000000UL) == 0)
  51. num += 32;
  52. else
  53. word >>= 32;
  54. #endif
  55. if ((word & 0xffff0000) == 0)
  56. num += 16;
  57. else
  58. word >>= 16;
  59. if ((word & 0xff00) == 0)
  60. num += 8;
  61. else
  62. word >>= 8;
  63. if ((word & 0xf0) == 0)
  64. num += 4;
  65. else
  66. word >>= 4;
  67. if ((word & 0xc) == 0)
  68. num += 2;
  69. else
  70. word >>= 2;
  71. if ((word & 0x2) == 0)
  72. num += 1;
  73. return num;
  74. }
  75. /*
  76. * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
  77. * f2fs_set_bit makes MSB and LSB reversed in a byte.
  78. * @size must be integral times of unsigned long.
  79. * Example:
  80. * MSB <--> LSB
  81. * f2fs_set_bit(0, bitmap) => 1000 0000
  82. * f2fs_set_bit(7, bitmap) => 0000 0001
  83. */
  84. static unsigned long __find_rev_next_bit(const unsigned long *addr,
  85. unsigned long size, unsigned long offset)
  86. {
  87. const unsigned long *p = addr + BIT_WORD(offset);
  88. unsigned long result = size;
  89. unsigned long tmp;
  90. if (offset >= size)
  91. return size;
  92. size -= (offset & ~(BITS_PER_LONG - 1));
  93. offset %= BITS_PER_LONG;
  94. while (1) {
  95. if (*p == 0)
  96. goto pass;
  97. tmp = __reverse_ulong((unsigned char *)p);
  98. tmp &= ~0UL >> offset;
  99. if (size < BITS_PER_LONG)
  100. tmp &= (~0UL << (BITS_PER_LONG - size));
  101. if (tmp)
  102. goto found;
  103. pass:
  104. if (size <= BITS_PER_LONG)
  105. break;
  106. size -= BITS_PER_LONG;
  107. offset = 0;
  108. p++;
  109. }
  110. return result;
  111. found:
  112. return result - size + __reverse_ffs(tmp);
  113. }
  114. static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
  115. unsigned long size, unsigned long offset)
  116. {
  117. const unsigned long *p = addr + BIT_WORD(offset);
  118. unsigned long result = size;
  119. unsigned long tmp;
  120. if (offset >= size)
  121. return size;
  122. size -= (offset & ~(BITS_PER_LONG - 1));
  123. offset %= BITS_PER_LONG;
  124. while (1) {
  125. if (*p == ~0UL)
  126. goto pass;
  127. tmp = __reverse_ulong((unsigned char *)p);
  128. if (offset)
  129. tmp |= ~0UL << (BITS_PER_LONG - offset);
  130. if (size < BITS_PER_LONG)
  131. tmp |= ~0UL >> size;
  132. if (tmp != ~0UL)
  133. goto found;
  134. pass:
  135. if (size <= BITS_PER_LONG)
  136. break;
  137. size -= BITS_PER_LONG;
  138. offset = 0;
  139. p++;
  140. }
  141. return result;
  142. found:
  143. return result - size + __reverse_ffz(tmp);
  144. }
  145. void register_inmem_page(struct inode *inode, struct page *page)
  146. {
  147. struct f2fs_inode_info *fi = F2FS_I(inode);
  148. struct inmem_pages *new;
  149. f2fs_trace_pid(page);
  150. set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
  151. SetPagePrivate(page);
  152. new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
  153. /* add atomic page indices to the list */
  154. new->page = page;
  155. INIT_LIST_HEAD(&new->list);
  156. /* increase reference count with clean state */
  157. mutex_lock(&fi->inmem_lock);
  158. get_page(page);
  159. list_add_tail(&new->list, &fi->inmem_pages);
  160. inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
  161. mutex_unlock(&fi->inmem_lock);
  162. trace_f2fs_register_inmem_page(page, INMEM);
  163. }
  164. static int __revoke_inmem_pages(struct inode *inode,
  165. struct list_head *head, bool drop, bool recover)
  166. {
  167. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  168. struct inmem_pages *cur, *tmp;
  169. int err = 0;
  170. list_for_each_entry_safe(cur, tmp, head, list) {
  171. struct page *page = cur->page;
  172. if (drop)
  173. trace_f2fs_commit_inmem_page(page, INMEM_DROP);
  174. lock_page(page);
  175. if (recover) {
  176. struct dnode_of_data dn;
  177. struct node_info ni;
  178. trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
  179. set_new_dnode(&dn, inode, NULL, NULL, 0);
  180. if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) {
  181. err = -EAGAIN;
  182. goto next;
  183. }
  184. get_node_info(sbi, dn.nid, &ni);
  185. f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
  186. cur->old_addr, ni.version, true, true);
  187. f2fs_put_dnode(&dn);
  188. }
  189. next:
  190. /* we don't need to invalidate this in the sccessful status */
  191. if (drop || recover)
  192. ClearPageUptodate(page);
  193. set_page_private(page, 0);
  194. ClearPagePrivate(page);
  195. f2fs_put_page(page, 1);
  196. list_del(&cur->list);
  197. kmem_cache_free(inmem_entry_slab, cur);
  198. dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
  199. }
  200. return err;
  201. }
  202. void drop_inmem_pages(struct inode *inode)
  203. {
  204. struct f2fs_inode_info *fi = F2FS_I(inode);
  205. mutex_lock(&fi->inmem_lock);
  206. __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
  207. mutex_unlock(&fi->inmem_lock);
  208. clear_inode_flag(inode, FI_ATOMIC_FILE);
  209. stat_dec_atomic_write(inode);
  210. }
  211. void drop_inmem_page(struct inode *inode, struct page *page)
  212. {
  213. struct f2fs_inode_info *fi = F2FS_I(inode);
  214. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  215. struct list_head *head = &fi->inmem_pages;
  216. struct inmem_pages *cur = NULL;
  217. f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
  218. mutex_lock(&fi->inmem_lock);
  219. list_for_each_entry(cur, head, list) {
  220. if (cur->page == page)
  221. break;
  222. }
  223. f2fs_bug_on(sbi, !cur || cur->page != page);
  224. list_del(&cur->list);
  225. mutex_unlock(&fi->inmem_lock);
  226. dec_page_count(sbi, F2FS_INMEM_PAGES);
  227. kmem_cache_free(inmem_entry_slab, cur);
  228. ClearPageUptodate(page);
  229. set_page_private(page, 0);
  230. ClearPagePrivate(page);
  231. f2fs_put_page(page, 0);
  232. trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
  233. }
  234. static int __commit_inmem_pages(struct inode *inode,
  235. struct list_head *revoke_list)
  236. {
  237. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  238. struct f2fs_inode_info *fi = F2FS_I(inode);
  239. struct inmem_pages *cur, *tmp;
  240. struct f2fs_io_info fio = {
  241. .sbi = sbi,
  242. .type = DATA,
  243. .op = REQ_OP_WRITE,
  244. .op_flags = REQ_SYNC | REQ_PRIO,
  245. .encrypted_page = NULL,
  246. };
  247. pgoff_t last_idx = ULONG_MAX;
  248. int err = 0;
  249. list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
  250. struct page *page = cur->page;
  251. lock_page(page);
  252. if (page->mapping == inode->i_mapping) {
  253. trace_f2fs_commit_inmem_page(page, INMEM);
  254. set_page_dirty(page);
  255. f2fs_wait_on_page_writeback(page, DATA, true);
  256. if (clear_page_dirty_for_io(page)) {
  257. inode_dec_dirty_pages(inode);
  258. remove_dirty_inode(inode);
  259. }
  260. fio.page = page;
  261. err = do_write_data_page(&fio);
  262. if (err) {
  263. unlock_page(page);
  264. break;
  265. }
  266. /* record old blkaddr for revoking */
  267. cur->old_addr = fio.old_blkaddr;
  268. last_idx = page->index;
  269. }
  270. unlock_page(page);
  271. list_move_tail(&cur->list, revoke_list);
  272. }
  273. if (last_idx != ULONG_MAX)
  274. f2fs_submit_merged_bio_cond(sbi, inode, 0, last_idx,
  275. DATA, WRITE);
  276. if (!err)
  277. __revoke_inmem_pages(inode, revoke_list, false, false);
  278. return err;
  279. }
  280. int commit_inmem_pages(struct inode *inode)
  281. {
  282. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  283. struct f2fs_inode_info *fi = F2FS_I(inode);
  284. struct list_head revoke_list;
  285. int err;
  286. INIT_LIST_HEAD(&revoke_list);
  287. f2fs_balance_fs(sbi, true);
  288. f2fs_lock_op(sbi);
  289. set_inode_flag(inode, FI_ATOMIC_COMMIT);
  290. mutex_lock(&fi->inmem_lock);
  291. err = __commit_inmem_pages(inode, &revoke_list);
  292. if (err) {
  293. int ret;
  294. /*
  295. * try to revoke all committed pages, but still we could fail
  296. * due to no memory or other reason, if that happened, EAGAIN
  297. * will be returned, which means in such case, transaction is
  298. * already not integrity, caller should use journal to do the
  299. * recovery or rewrite & commit last transaction. For other
  300. * error number, revoking was done by filesystem itself.
  301. */
  302. ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
  303. if (ret)
  304. err = ret;
  305. /* drop all uncommitted pages */
  306. __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
  307. }
  308. mutex_unlock(&fi->inmem_lock);
  309. clear_inode_flag(inode, FI_ATOMIC_COMMIT);
  310. f2fs_unlock_op(sbi);
  311. return err;
  312. }
  313. /*
  314. * This function balances dirty node and dentry pages.
  315. * In addition, it controls garbage collection.
  316. */
  317. void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
  318. {
  319. #ifdef CONFIG_F2FS_FAULT_INJECTION
  320. if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
  321. f2fs_show_injection_info(FAULT_CHECKPOINT);
  322. f2fs_stop_checkpoint(sbi, false);
  323. }
  324. #endif
  325. if (!need)
  326. return;
  327. /* balance_fs_bg is able to be pending */
  328. if (excess_cached_nats(sbi))
  329. f2fs_balance_fs_bg(sbi);
  330. /*
  331. * We should do GC or end up with checkpoint, if there are so many dirty
  332. * dir/node pages without enough free segments.
  333. */
  334. if (has_not_enough_free_secs(sbi, 0, 0)) {
  335. mutex_lock(&sbi->gc_mutex);
  336. f2fs_gc(sbi, false, false);
  337. }
  338. }
  339. void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
  340. {
  341. /* try to shrink extent cache when there is no enough memory */
  342. if (!available_free_memory(sbi, EXTENT_CACHE))
  343. f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
  344. /* check the # of cached NAT entries */
  345. if (!available_free_memory(sbi, NAT_ENTRIES))
  346. try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
  347. if (!available_free_memory(sbi, FREE_NIDS))
  348. try_to_free_nids(sbi, MAX_FREE_NIDS);
  349. else
  350. build_free_nids(sbi, false, false);
  351. if (!is_idle(sbi))
  352. return;
  353. /* checkpoint is the only way to shrink partial cached entries */
  354. if (!available_free_memory(sbi, NAT_ENTRIES) ||
  355. !available_free_memory(sbi, INO_ENTRIES) ||
  356. excess_prefree_segs(sbi) ||
  357. excess_dirty_nats(sbi) ||
  358. f2fs_time_over(sbi, CP_TIME)) {
  359. if (test_opt(sbi, DATA_FLUSH)) {
  360. struct blk_plug plug;
  361. blk_start_plug(&plug);
  362. sync_dirty_inodes(sbi, FILE_INODE);
  363. blk_finish_plug(&plug);
  364. }
  365. f2fs_sync_fs(sbi->sb, true);
  366. stat_inc_bg_cp_count(sbi->stat_info);
  367. }
  368. }
  369. static int __submit_flush_wait(struct f2fs_sb_info *sbi,
  370. struct block_device *bdev)
  371. {
  372. struct bio *bio = f2fs_bio_alloc(0);
  373. int ret;
  374. bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
  375. bio->bi_bdev = bdev;
  376. ret = submit_bio_wait(bio);
  377. bio_put(bio);
  378. trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
  379. test_opt(sbi, FLUSH_MERGE), ret);
  380. return ret;
  381. }
  382. static int submit_flush_wait(struct f2fs_sb_info *sbi)
  383. {
  384. int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev);
  385. int i;
  386. if (!sbi->s_ndevs || ret)
  387. return ret;
  388. for (i = 1; i < sbi->s_ndevs; i++) {
  389. ret = __submit_flush_wait(sbi, FDEV(i).bdev);
  390. if (ret)
  391. break;
  392. }
  393. return ret;
  394. }
  395. static int issue_flush_thread(void *data)
  396. {
  397. struct f2fs_sb_info *sbi = data;
  398. struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
  399. wait_queue_head_t *q = &fcc->flush_wait_queue;
  400. repeat:
  401. if (kthread_should_stop())
  402. return 0;
  403. if (!llist_empty(&fcc->issue_list)) {
  404. struct flush_cmd *cmd, *next;
  405. int ret;
  406. fcc->dispatch_list = llist_del_all(&fcc->issue_list);
  407. fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
  408. ret = submit_flush_wait(sbi);
  409. atomic_inc(&fcc->issued_flush);
  410. llist_for_each_entry_safe(cmd, next,
  411. fcc->dispatch_list, llnode) {
  412. cmd->ret = ret;
  413. complete(&cmd->wait);
  414. }
  415. fcc->dispatch_list = NULL;
  416. }
  417. wait_event_interruptible(*q,
  418. kthread_should_stop() || !llist_empty(&fcc->issue_list));
  419. goto repeat;
  420. }
  421. int f2fs_issue_flush(struct f2fs_sb_info *sbi)
  422. {
  423. struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
  424. struct flush_cmd cmd;
  425. int ret;
  426. if (test_opt(sbi, NOBARRIER))
  427. return 0;
  428. if (!test_opt(sbi, FLUSH_MERGE)) {
  429. ret = submit_flush_wait(sbi);
  430. atomic_inc(&fcc->issued_flush);
  431. return ret;
  432. }
  433. if (!atomic_read(&fcc->issing_flush)) {
  434. atomic_inc(&fcc->issing_flush);
  435. ret = submit_flush_wait(sbi);
  436. atomic_dec(&fcc->issing_flush);
  437. atomic_inc(&fcc->issued_flush);
  438. return ret;
  439. }
  440. init_completion(&cmd.wait);
  441. atomic_inc(&fcc->issing_flush);
  442. llist_add(&cmd.llnode, &fcc->issue_list);
  443. if (!fcc->dispatch_list)
  444. wake_up(&fcc->flush_wait_queue);
  445. if (fcc->f2fs_issue_flush) {
  446. wait_for_completion(&cmd.wait);
  447. atomic_dec(&fcc->issing_flush);
  448. } else {
  449. llist_del_all(&fcc->issue_list);
  450. atomic_set(&fcc->issing_flush, 0);
  451. }
  452. return cmd.ret;
  453. }
  454. int create_flush_cmd_control(struct f2fs_sb_info *sbi)
  455. {
  456. dev_t dev = sbi->sb->s_bdev->bd_dev;
  457. struct flush_cmd_control *fcc;
  458. int err = 0;
  459. if (SM_I(sbi)->fcc_info) {
  460. fcc = SM_I(sbi)->fcc_info;
  461. goto init_thread;
  462. }
  463. fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
  464. if (!fcc)
  465. return -ENOMEM;
  466. atomic_set(&fcc->issued_flush, 0);
  467. atomic_set(&fcc->issing_flush, 0);
  468. init_waitqueue_head(&fcc->flush_wait_queue);
  469. init_llist_head(&fcc->issue_list);
  470. SM_I(sbi)->fcc_info = fcc;
  471. init_thread:
  472. fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
  473. "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
  474. if (IS_ERR(fcc->f2fs_issue_flush)) {
  475. err = PTR_ERR(fcc->f2fs_issue_flush);
  476. kfree(fcc);
  477. SM_I(sbi)->fcc_info = NULL;
  478. return err;
  479. }
  480. return err;
  481. }
  482. void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
  483. {
  484. struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
  485. if (fcc && fcc->f2fs_issue_flush) {
  486. struct task_struct *flush_thread = fcc->f2fs_issue_flush;
  487. fcc->f2fs_issue_flush = NULL;
  488. kthread_stop(flush_thread);
  489. }
  490. if (free) {
  491. kfree(fcc);
  492. SM_I(sbi)->fcc_info = NULL;
  493. }
  494. }
  495. static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
  496. enum dirty_type dirty_type)
  497. {
  498. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  499. /* need not be added */
  500. if (IS_CURSEG(sbi, segno))
  501. return;
  502. if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
  503. dirty_i->nr_dirty[dirty_type]++;
  504. if (dirty_type == DIRTY) {
  505. struct seg_entry *sentry = get_seg_entry(sbi, segno);
  506. enum dirty_type t = sentry->type;
  507. if (unlikely(t >= DIRTY)) {
  508. f2fs_bug_on(sbi, 1);
  509. return;
  510. }
  511. if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
  512. dirty_i->nr_dirty[t]++;
  513. }
  514. }
  515. static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
  516. enum dirty_type dirty_type)
  517. {
  518. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  519. if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
  520. dirty_i->nr_dirty[dirty_type]--;
  521. if (dirty_type == DIRTY) {
  522. struct seg_entry *sentry = get_seg_entry(sbi, segno);
  523. enum dirty_type t = sentry->type;
  524. if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
  525. dirty_i->nr_dirty[t]--;
  526. if (get_valid_blocks(sbi, segno, true) == 0)
  527. clear_bit(GET_SEC_FROM_SEG(sbi, segno),
  528. dirty_i->victim_secmap);
  529. }
  530. }
  531. /*
  532. * Should not occur error such as -ENOMEM.
  533. * Adding dirty entry into seglist is not critical operation.
  534. * If a given segment is one of current working segments, it won't be added.
  535. */
  536. static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
  537. {
  538. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  539. unsigned short valid_blocks;
  540. if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
  541. return;
  542. mutex_lock(&dirty_i->seglist_lock);
  543. valid_blocks = get_valid_blocks(sbi, segno, false);
  544. if (valid_blocks == 0) {
  545. __locate_dirty_segment(sbi, segno, PRE);
  546. __remove_dirty_segment(sbi, segno, DIRTY);
  547. } else if (valid_blocks < sbi->blocks_per_seg) {
  548. __locate_dirty_segment(sbi, segno, DIRTY);
  549. } else {
  550. /* Recovery routine with SSR needs this */
  551. __remove_dirty_segment(sbi, segno, DIRTY);
  552. }
  553. mutex_unlock(&dirty_i->seglist_lock);
  554. }
  555. static void __add_discard_cmd(struct f2fs_sb_info *sbi,
  556. struct block_device *bdev, block_t lstart,
  557. block_t start, block_t len)
  558. {
  559. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  560. struct list_head *pend_list = &(dcc->discard_pend_list);
  561. struct discard_cmd *dc;
  562. dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
  563. INIT_LIST_HEAD(&dc->list);
  564. dc->bdev = bdev;
  565. dc->lstart = lstart;
  566. dc->start = start;
  567. dc->len = len;
  568. dc->state = D_PREP;
  569. dc->error = 0;
  570. init_completion(&dc->wait);
  571. mutex_lock(&dcc->cmd_lock);
  572. list_add_tail(&dc->list, pend_list);
  573. mutex_unlock(&dcc->cmd_lock);
  574. atomic_inc(&dcc->discard_cmd_cnt);
  575. }
  576. static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc)
  577. {
  578. if (dc->state == D_DONE)
  579. atomic_dec(&(SM_I(sbi)->dcc_info->issing_discard));
  580. if (dc->error == -EOPNOTSUPP)
  581. dc->error = 0;
  582. if (dc->error)
  583. f2fs_msg(sbi->sb, KERN_INFO,
  584. "Issue discard failed, ret: %d", dc->error);
  585. list_del(&dc->list);
  586. kmem_cache_free(discard_cmd_slab, dc);
  587. atomic_dec(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
  588. }
  589. static void f2fs_submit_discard_endio(struct bio *bio)
  590. {
  591. struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
  592. dc->error = bio->bi_error;
  593. dc->state = D_DONE;
  594. complete(&dc->wait);
  595. bio_put(bio);
  596. }
  597. /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
  598. static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
  599. struct discard_cmd *dc)
  600. {
  601. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  602. struct bio *bio = NULL;
  603. if (dc->state != D_PREP)
  604. return;
  605. dc->error = __blkdev_issue_discard(dc->bdev,
  606. SECTOR_FROM_BLOCK(dc->start),
  607. SECTOR_FROM_BLOCK(dc->len),
  608. GFP_NOFS, 0, &bio);
  609. if (!dc->error) {
  610. /* should keep before submission to avoid D_DONE right away */
  611. dc->state = D_SUBMIT;
  612. atomic_inc(&dcc->issued_discard);
  613. atomic_inc(&dcc->issing_discard);
  614. if (bio) {
  615. bio->bi_private = dc;
  616. bio->bi_end_io = f2fs_submit_discard_endio;
  617. bio->bi_opf |= REQ_SYNC;
  618. submit_bio(bio);
  619. list_move_tail(&dc->list, &dcc->discard_wait_list);
  620. }
  621. } else {
  622. __remove_discard_cmd(sbi, dc);
  623. }
  624. }
  625. static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
  626. struct block_device *bdev, block_t blkstart, block_t blklen)
  627. {
  628. block_t lblkstart = blkstart;
  629. trace_f2fs_issue_discard(bdev, blkstart, blklen);
  630. if (sbi->s_ndevs) {
  631. int devi = f2fs_target_device_index(sbi, blkstart);
  632. blkstart -= FDEV(devi).start_blk;
  633. }
  634. __add_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
  635. wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
  636. return 0;
  637. }
  638. static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
  639. struct discard_cmd *dc, block_t blkaddr)
  640. {
  641. block_t end_block = START_BLOCK(sbi, GET_SEGNO(sbi, blkaddr) + 1);
  642. if (dc->state == D_DONE || dc->lstart + dc->len <= end_block) {
  643. __remove_discard_cmd(sbi, dc);
  644. return;
  645. }
  646. if (blkaddr - dc->lstart < dc->lstart + dc->len - end_block) {
  647. dc->start += (end_block - dc->lstart);
  648. dc->len -= (end_block - dc->lstart);
  649. dc->lstart = end_block;
  650. } else {
  651. dc->len = blkaddr - dc->lstart;
  652. }
  653. }
  654. /* This should be covered by global mutex, &sit_i->sentry_lock */
  655. void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
  656. {
  657. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  658. struct list_head *pend_list = &(dcc->discard_pend_list);
  659. struct list_head *wait_list = &(dcc->discard_wait_list);
  660. struct discard_cmd *dc, *tmp;
  661. mutex_lock(&dcc->cmd_lock);
  662. list_for_each_entry_safe(dc, tmp, pend_list, list) {
  663. if (dc->lstart <= blkaddr && blkaddr < dc->lstart + dc->len)
  664. __punch_discard_cmd(sbi, dc, blkaddr);
  665. }
  666. list_for_each_entry_safe(dc, tmp, wait_list, list) {
  667. if (dc->lstart <= blkaddr && blkaddr < dc->lstart + dc->len) {
  668. wait_for_completion_io(&dc->wait);
  669. __punch_discard_cmd(sbi, dc, blkaddr);
  670. }
  671. }
  672. mutex_unlock(&dcc->cmd_lock);
  673. }
  674. /* This comes from f2fs_put_super */
  675. void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
  676. {
  677. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  678. struct list_head *pend_list = &(dcc->discard_pend_list);
  679. struct list_head *wait_list = &(dcc->discard_wait_list);
  680. struct discard_cmd *dc, *tmp;
  681. struct blk_plug plug;
  682. mutex_lock(&dcc->cmd_lock);
  683. blk_start_plug(&plug);
  684. list_for_each_entry_safe(dc, tmp, pend_list, list)
  685. __submit_discard_cmd(sbi, dc);
  686. blk_finish_plug(&plug);
  687. list_for_each_entry_safe(dc, tmp, wait_list, list) {
  688. wait_for_completion_io(&dc->wait);
  689. __remove_discard_cmd(sbi, dc);
  690. }
  691. mutex_unlock(&dcc->cmd_lock);
  692. }
  693. static int issue_discard_thread(void *data)
  694. {
  695. struct f2fs_sb_info *sbi = data;
  696. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  697. wait_queue_head_t *q = &dcc->discard_wait_queue;
  698. struct list_head *pend_list = &dcc->discard_pend_list;
  699. struct list_head *wait_list = &dcc->discard_wait_list;
  700. struct discard_cmd *dc, *tmp;
  701. struct blk_plug plug;
  702. int iter = 0;
  703. repeat:
  704. if (kthread_should_stop())
  705. return 0;
  706. mutex_lock(&dcc->cmd_lock);
  707. blk_start_plug(&plug);
  708. list_for_each_entry_safe(dc, tmp, pend_list, list) {
  709. f2fs_bug_on(sbi, dc->state != D_PREP);
  710. if (is_idle(sbi))
  711. __submit_discard_cmd(sbi, dc);
  712. if (iter++ > DISCARD_ISSUE_RATE)
  713. break;
  714. }
  715. blk_finish_plug(&plug);
  716. list_for_each_entry_safe(dc, tmp, wait_list, list) {
  717. if (dc->state == D_DONE) {
  718. wait_for_completion_io(&dc->wait);
  719. __remove_discard_cmd(sbi, dc);
  720. }
  721. }
  722. mutex_unlock(&dcc->cmd_lock);
  723. iter = 0;
  724. congestion_wait(BLK_RW_SYNC, HZ/50);
  725. wait_event_interruptible(*q, kthread_should_stop() ||
  726. !list_empty(pend_list) || !list_empty(wait_list));
  727. goto repeat;
  728. }
  729. #ifdef CONFIG_BLK_DEV_ZONED
  730. static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
  731. struct block_device *bdev, block_t blkstart, block_t blklen)
  732. {
  733. sector_t sector, nr_sects;
  734. block_t lblkstart = blkstart;
  735. int devi = 0;
  736. if (sbi->s_ndevs) {
  737. devi = f2fs_target_device_index(sbi, blkstart);
  738. blkstart -= FDEV(devi).start_blk;
  739. }
  740. /*
  741. * We need to know the type of the zone: for conventional zones,
  742. * use regular discard if the drive supports it. For sequential
  743. * zones, reset the zone write pointer.
  744. */
  745. switch (get_blkz_type(sbi, bdev, blkstart)) {
  746. case BLK_ZONE_TYPE_CONVENTIONAL:
  747. if (!blk_queue_discard(bdev_get_queue(bdev)))
  748. return 0;
  749. return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
  750. case BLK_ZONE_TYPE_SEQWRITE_REQ:
  751. case BLK_ZONE_TYPE_SEQWRITE_PREF:
  752. sector = SECTOR_FROM_BLOCK(blkstart);
  753. nr_sects = SECTOR_FROM_BLOCK(blklen);
  754. if (sector & (bdev_zone_sectors(bdev) - 1) ||
  755. nr_sects != bdev_zone_sectors(bdev)) {
  756. f2fs_msg(sbi->sb, KERN_INFO,
  757. "(%d) %s: Unaligned discard attempted (block %x + %x)",
  758. devi, sbi->s_ndevs ? FDEV(devi).path: "",
  759. blkstart, blklen);
  760. return -EIO;
  761. }
  762. trace_f2fs_issue_reset_zone(bdev, blkstart);
  763. return blkdev_reset_zones(bdev, sector,
  764. nr_sects, GFP_NOFS);
  765. default:
  766. /* Unknown zone type: broken device ? */
  767. return -EIO;
  768. }
  769. }
  770. #endif
  771. static int __issue_discard_async(struct f2fs_sb_info *sbi,
  772. struct block_device *bdev, block_t blkstart, block_t blklen)
  773. {
  774. #ifdef CONFIG_BLK_DEV_ZONED
  775. if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
  776. bdev_zoned_model(bdev) != BLK_ZONED_NONE)
  777. return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
  778. #endif
  779. return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
  780. }
  781. static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
  782. block_t blkstart, block_t blklen)
  783. {
  784. sector_t start = blkstart, len = 0;
  785. struct block_device *bdev;
  786. struct seg_entry *se;
  787. unsigned int offset;
  788. block_t i;
  789. int err = 0;
  790. bdev = f2fs_target_device(sbi, blkstart, NULL);
  791. for (i = blkstart; i < blkstart + blklen; i++, len++) {
  792. if (i != start) {
  793. struct block_device *bdev2 =
  794. f2fs_target_device(sbi, i, NULL);
  795. if (bdev2 != bdev) {
  796. err = __issue_discard_async(sbi, bdev,
  797. start, len);
  798. if (err)
  799. return err;
  800. bdev = bdev2;
  801. start = i;
  802. len = 0;
  803. }
  804. }
  805. se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
  806. offset = GET_BLKOFF_FROM_SEG0(sbi, i);
  807. if (!f2fs_test_and_set_bit(offset, se->discard_map))
  808. sbi->discard_blks--;
  809. }
  810. if (len)
  811. err = __issue_discard_async(sbi, bdev, start, len);
  812. return err;
  813. }
  814. static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
  815. bool check_only)
  816. {
  817. int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
  818. int max_blocks = sbi->blocks_per_seg;
  819. struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
  820. unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
  821. unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
  822. unsigned long *discard_map = (unsigned long *)se->discard_map;
  823. unsigned long *dmap = SIT_I(sbi)->tmp_map;
  824. unsigned int start = 0, end = -1;
  825. bool force = (cpc->reason == CP_DISCARD);
  826. struct discard_entry *de = NULL;
  827. struct list_head *head = &SM_I(sbi)->dcc_info->discard_entry_list;
  828. int i;
  829. if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
  830. return false;
  831. if (!force) {
  832. if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
  833. SM_I(sbi)->dcc_info->nr_discards >=
  834. SM_I(sbi)->dcc_info->max_discards)
  835. return false;
  836. }
  837. /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
  838. for (i = 0; i < entries; i++)
  839. dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
  840. (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
  841. while (force || SM_I(sbi)->dcc_info->nr_discards <=
  842. SM_I(sbi)->dcc_info->max_discards) {
  843. start = __find_rev_next_bit(dmap, max_blocks, end + 1);
  844. if (start >= max_blocks)
  845. break;
  846. end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
  847. if (force && start && end != max_blocks
  848. && (end - start) < cpc->trim_minlen)
  849. continue;
  850. if (check_only)
  851. return true;
  852. if (!de) {
  853. de = f2fs_kmem_cache_alloc(discard_entry_slab,
  854. GFP_F2FS_ZERO);
  855. de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
  856. list_add_tail(&de->list, head);
  857. }
  858. for (i = start; i < end; i++)
  859. __set_bit_le(i, (void *)de->discard_map);
  860. SM_I(sbi)->dcc_info->nr_discards += end - start;
  861. }
  862. return false;
  863. }
  864. void release_discard_addrs(struct f2fs_sb_info *sbi)
  865. {
  866. struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
  867. struct discard_entry *entry, *this;
  868. /* drop caches */
  869. list_for_each_entry_safe(entry, this, head, list) {
  870. list_del(&entry->list);
  871. kmem_cache_free(discard_entry_slab, entry);
  872. }
  873. }
  874. /*
  875. * Should call clear_prefree_segments after checkpoint is done.
  876. */
  877. static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
  878. {
  879. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  880. unsigned int segno;
  881. mutex_lock(&dirty_i->seglist_lock);
  882. for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
  883. __set_test_and_free(sbi, segno);
  884. mutex_unlock(&dirty_i->seglist_lock);
  885. }
  886. void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  887. {
  888. struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
  889. struct discard_entry *entry, *this;
  890. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  891. unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
  892. unsigned int start = 0, end = -1;
  893. unsigned int secno, start_segno;
  894. bool force = (cpc->reason == CP_DISCARD);
  895. mutex_lock(&dirty_i->seglist_lock);
  896. while (1) {
  897. int i;
  898. start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
  899. if (start >= MAIN_SEGS(sbi))
  900. break;
  901. end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
  902. start + 1);
  903. for (i = start; i < end; i++)
  904. clear_bit(i, prefree_map);
  905. dirty_i->nr_dirty[PRE] -= end - start;
  906. if (!test_opt(sbi, DISCARD))
  907. continue;
  908. if (force && start >= cpc->trim_start &&
  909. (end - 1) <= cpc->trim_end)
  910. continue;
  911. if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
  912. f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
  913. (end - start) << sbi->log_blocks_per_seg);
  914. continue;
  915. }
  916. next:
  917. secno = GET_SEC_FROM_SEG(sbi, start);
  918. start_segno = GET_SEG_FROM_SEC(sbi, secno);
  919. if (!IS_CURSEC(sbi, secno) &&
  920. !get_valid_blocks(sbi, start, true))
  921. f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
  922. sbi->segs_per_sec << sbi->log_blocks_per_seg);
  923. start = start_segno + sbi->segs_per_sec;
  924. if (start < end)
  925. goto next;
  926. else
  927. end = start - 1;
  928. }
  929. mutex_unlock(&dirty_i->seglist_lock);
  930. /* send small discards */
  931. list_for_each_entry_safe(entry, this, head, list) {
  932. unsigned int cur_pos = 0, next_pos, len, total_len = 0;
  933. bool is_valid = test_bit_le(0, entry->discard_map);
  934. find_next:
  935. if (is_valid) {
  936. next_pos = find_next_zero_bit_le(entry->discard_map,
  937. sbi->blocks_per_seg, cur_pos);
  938. len = next_pos - cur_pos;
  939. if (force && len < cpc->trim_minlen)
  940. goto skip;
  941. f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
  942. len);
  943. cpc->trimmed += len;
  944. total_len += len;
  945. } else {
  946. next_pos = find_next_bit_le(entry->discard_map,
  947. sbi->blocks_per_seg, cur_pos);
  948. }
  949. skip:
  950. cur_pos = next_pos;
  951. is_valid = !is_valid;
  952. if (cur_pos < sbi->blocks_per_seg)
  953. goto find_next;
  954. list_del(&entry->list);
  955. SM_I(sbi)->dcc_info->nr_discards -= total_len;
  956. kmem_cache_free(discard_entry_slab, entry);
  957. }
  958. }
  959. static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
  960. {
  961. dev_t dev = sbi->sb->s_bdev->bd_dev;
  962. struct discard_cmd_control *dcc;
  963. int err = 0;
  964. if (SM_I(sbi)->dcc_info) {
  965. dcc = SM_I(sbi)->dcc_info;
  966. goto init_thread;
  967. }
  968. dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
  969. if (!dcc)
  970. return -ENOMEM;
  971. INIT_LIST_HEAD(&dcc->discard_entry_list);
  972. INIT_LIST_HEAD(&dcc->discard_pend_list);
  973. INIT_LIST_HEAD(&dcc->discard_wait_list);
  974. mutex_init(&dcc->cmd_lock);
  975. atomic_set(&dcc->issued_discard, 0);
  976. atomic_set(&dcc->issing_discard, 0);
  977. atomic_set(&dcc->discard_cmd_cnt, 0);
  978. dcc->nr_discards = 0;
  979. dcc->max_discards = 0;
  980. init_waitqueue_head(&dcc->discard_wait_queue);
  981. SM_I(sbi)->dcc_info = dcc;
  982. init_thread:
  983. dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
  984. "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
  985. if (IS_ERR(dcc->f2fs_issue_discard)) {
  986. err = PTR_ERR(dcc->f2fs_issue_discard);
  987. kfree(dcc);
  988. SM_I(sbi)->dcc_info = NULL;
  989. return err;
  990. }
  991. return err;
  992. }
  993. static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
  994. {
  995. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  996. if (!dcc)
  997. return;
  998. if (dcc->f2fs_issue_discard) {
  999. struct task_struct *discard_thread = dcc->f2fs_issue_discard;
  1000. dcc->f2fs_issue_discard = NULL;
  1001. kthread_stop(discard_thread);
  1002. }
  1003. kfree(dcc);
  1004. SM_I(sbi)->dcc_info = NULL;
  1005. }
  1006. static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
  1007. {
  1008. struct sit_info *sit_i = SIT_I(sbi);
  1009. if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
  1010. sit_i->dirty_sentries++;
  1011. return false;
  1012. }
  1013. return true;
  1014. }
  1015. static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
  1016. unsigned int segno, int modified)
  1017. {
  1018. struct seg_entry *se = get_seg_entry(sbi, segno);
  1019. se->type = type;
  1020. if (modified)
  1021. __mark_sit_entry_dirty(sbi, segno);
  1022. }
  1023. static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
  1024. {
  1025. struct seg_entry *se;
  1026. unsigned int segno, offset;
  1027. long int new_vblocks;
  1028. segno = GET_SEGNO(sbi, blkaddr);
  1029. se = get_seg_entry(sbi, segno);
  1030. new_vblocks = se->valid_blocks + del;
  1031. offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
  1032. f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
  1033. (new_vblocks > sbi->blocks_per_seg)));
  1034. se->valid_blocks = new_vblocks;
  1035. se->mtime = get_mtime(sbi);
  1036. SIT_I(sbi)->max_mtime = se->mtime;
  1037. /* Update valid block bitmap */
  1038. if (del > 0) {
  1039. if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) {
  1040. #ifdef CONFIG_F2FS_CHECK_FS
  1041. if (f2fs_test_and_set_bit(offset,
  1042. se->cur_valid_map_mir))
  1043. f2fs_bug_on(sbi, 1);
  1044. else
  1045. WARN_ON(1);
  1046. #else
  1047. f2fs_bug_on(sbi, 1);
  1048. #endif
  1049. }
  1050. if (f2fs_discard_en(sbi) &&
  1051. !f2fs_test_and_set_bit(offset, se->discard_map))
  1052. sbi->discard_blks--;
  1053. /* don't overwrite by SSR to keep node chain */
  1054. if (se->type == CURSEG_WARM_NODE) {
  1055. if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
  1056. se->ckpt_valid_blocks++;
  1057. }
  1058. } else {
  1059. if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
  1060. #ifdef CONFIG_F2FS_CHECK_FS
  1061. if (!f2fs_test_and_clear_bit(offset,
  1062. se->cur_valid_map_mir))
  1063. f2fs_bug_on(sbi, 1);
  1064. else
  1065. WARN_ON(1);
  1066. #else
  1067. f2fs_bug_on(sbi, 1);
  1068. #endif
  1069. }
  1070. if (f2fs_discard_en(sbi) &&
  1071. f2fs_test_and_clear_bit(offset, se->discard_map))
  1072. sbi->discard_blks++;
  1073. }
  1074. if (!f2fs_test_bit(offset, se->ckpt_valid_map))
  1075. se->ckpt_valid_blocks += del;
  1076. __mark_sit_entry_dirty(sbi, segno);
  1077. /* update total number of valid blocks to be written in ckpt area */
  1078. SIT_I(sbi)->written_valid_blocks += del;
  1079. if (sbi->segs_per_sec > 1)
  1080. get_sec_entry(sbi, segno)->valid_blocks += del;
  1081. }
  1082. void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
  1083. {
  1084. update_sit_entry(sbi, new, 1);
  1085. if (GET_SEGNO(sbi, old) != NULL_SEGNO)
  1086. update_sit_entry(sbi, old, -1);
  1087. locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
  1088. locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
  1089. }
  1090. void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
  1091. {
  1092. unsigned int segno = GET_SEGNO(sbi, addr);
  1093. struct sit_info *sit_i = SIT_I(sbi);
  1094. f2fs_bug_on(sbi, addr == NULL_ADDR);
  1095. if (addr == NEW_ADDR)
  1096. return;
  1097. /* add it into sit main buffer */
  1098. mutex_lock(&sit_i->sentry_lock);
  1099. update_sit_entry(sbi, addr, -1);
  1100. /* add it into dirty seglist */
  1101. locate_dirty_segment(sbi, segno);
  1102. mutex_unlock(&sit_i->sentry_lock);
  1103. }
  1104. bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
  1105. {
  1106. struct sit_info *sit_i = SIT_I(sbi);
  1107. unsigned int segno, offset;
  1108. struct seg_entry *se;
  1109. bool is_cp = false;
  1110. if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
  1111. return true;
  1112. mutex_lock(&sit_i->sentry_lock);
  1113. segno = GET_SEGNO(sbi, blkaddr);
  1114. se = get_seg_entry(sbi, segno);
  1115. offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
  1116. if (f2fs_test_bit(offset, se->ckpt_valid_map))
  1117. is_cp = true;
  1118. mutex_unlock(&sit_i->sentry_lock);
  1119. return is_cp;
  1120. }
  1121. /*
  1122. * This function should be resided under the curseg_mutex lock
  1123. */
  1124. static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
  1125. struct f2fs_summary *sum)
  1126. {
  1127. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1128. void *addr = curseg->sum_blk;
  1129. addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
  1130. memcpy(addr, sum, sizeof(struct f2fs_summary));
  1131. }
  1132. /*
  1133. * Calculate the number of current summary pages for writing
  1134. */
  1135. int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
  1136. {
  1137. int valid_sum_count = 0;
  1138. int i, sum_in_page;
  1139. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  1140. if (sbi->ckpt->alloc_type[i] == SSR)
  1141. valid_sum_count += sbi->blocks_per_seg;
  1142. else {
  1143. if (for_ra)
  1144. valid_sum_count += le16_to_cpu(
  1145. F2FS_CKPT(sbi)->cur_data_blkoff[i]);
  1146. else
  1147. valid_sum_count += curseg_blkoff(sbi, i);
  1148. }
  1149. }
  1150. sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
  1151. SUM_FOOTER_SIZE) / SUMMARY_SIZE;
  1152. if (valid_sum_count <= sum_in_page)
  1153. return 1;
  1154. else if ((valid_sum_count - sum_in_page) <=
  1155. (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
  1156. return 2;
  1157. return 3;
  1158. }
  1159. /*
  1160. * Caller should put this summary page
  1161. */
  1162. struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
  1163. {
  1164. return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
  1165. }
  1166. void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
  1167. {
  1168. struct page *page = grab_meta_page(sbi, blk_addr);
  1169. void *dst = page_address(page);
  1170. if (src)
  1171. memcpy(dst, src, PAGE_SIZE);
  1172. else
  1173. memset(dst, 0, PAGE_SIZE);
  1174. set_page_dirty(page);
  1175. f2fs_put_page(page, 1);
  1176. }
  1177. static void write_sum_page(struct f2fs_sb_info *sbi,
  1178. struct f2fs_summary_block *sum_blk, block_t blk_addr)
  1179. {
  1180. update_meta_page(sbi, (void *)sum_blk, blk_addr);
  1181. }
  1182. static void write_current_sum_page(struct f2fs_sb_info *sbi,
  1183. int type, block_t blk_addr)
  1184. {
  1185. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1186. struct page *page = grab_meta_page(sbi, blk_addr);
  1187. struct f2fs_summary_block *src = curseg->sum_blk;
  1188. struct f2fs_summary_block *dst;
  1189. dst = (struct f2fs_summary_block *)page_address(page);
  1190. mutex_lock(&curseg->curseg_mutex);
  1191. down_read(&curseg->journal_rwsem);
  1192. memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
  1193. up_read(&curseg->journal_rwsem);
  1194. memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
  1195. memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
  1196. mutex_unlock(&curseg->curseg_mutex);
  1197. set_page_dirty(page);
  1198. f2fs_put_page(page, 1);
  1199. }
  1200. /*
  1201. * Find a new segment from the free segments bitmap to right order
  1202. * This function should be returned with success, otherwise BUG
  1203. */
  1204. static void get_new_segment(struct f2fs_sb_info *sbi,
  1205. unsigned int *newseg, bool new_sec, int dir)
  1206. {
  1207. struct free_segmap_info *free_i = FREE_I(sbi);
  1208. unsigned int segno, secno, zoneno;
  1209. unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
  1210. unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
  1211. unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
  1212. unsigned int left_start = hint;
  1213. bool init = true;
  1214. int go_left = 0;
  1215. int i;
  1216. spin_lock(&free_i->segmap_lock);
  1217. if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
  1218. segno = find_next_zero_bit(free_i->free_segmap,
  1219. GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
  1220. if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
  1221. goto got_it;
  1222. }
  1223. find_other_zone:
  1224. secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
  1225. if (secno >= MAIN_SECS(sbi)) {
  1226. if (dir == ALLOC_RIGHT) {
  1227. secno = find_next_zero_bit(free_i->free_secmap,
  1228. MAIN_SECS(sbi), 0);
  1229. f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
  1230. } else {
  1231. go_left = 1;
  1232. left_start = hint - 1;
  1233. }
  1234. }
  1235. if (go_left == 0)
  1236. goto skip_left;
  1237. while (test_bit(left_start, free_i->free_secmap)) {
  1238. if (left_start > 0) {
  1239. left_start--;
  1240. continue;
  1241. }
  1242. left_start = find_next_zero_bit(free_i->free_secmap,
  1243. MAIN_SECS(sbi), 0);
  1244. f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
  1245. break;
  1246. }
  1247. secno = left_start;
  1248. skip_left:
  1249. hint = secno;
  1250. segno = GET_SEG_FROM_SEC(sbi, secno);
  1251. zoneno = GET_ZONE_FROM_SEC(sbi, secno);
  1252. /* give up on finding another zone */
  1253. if (!init)
  1254. goto got_it;
  1255. if (sbi->secs_per_zone == 1)
  1256. goto got_it;
  1257. if (zoneno == old_zoneno)
  1258. goto got_it;
  1259. if (dir == ALLOC_LEFT) {
  1260. if (!go_left && zoneno + 1 >= total_zones)
  1261. goto got_it;
  1262. if (go_left && zoneno == 0)
  1263. goto got_it;
  1264. }
  1265. for (i = 0; i < NR_CURSEG_TYPE; i++)
  1266. if (CURSEG_I(sbi, i)->zone == zoneno)
  1267. break;
  1268. if (i < NR_CURSEG_TYPE) {
  1269. /* zone is in user, try another */
  1270. if (go_left)
  1271. hint = zoneno * sbi->secs_per_zone - 1;
  1272. else if (zoneno + 1 >= total_zones)
  1273. hint = 0;
  1274. else
  1275. hint = (zoneno + 1) * sbi->secs_per_zone;
  1276. init = false;
  1277. goto find_other_zone;
  1278. }
  1279. got_it:
  1280. /* set it as dirty segment in free segmap */
  1281. f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
  1282. __set_inuse(sbi, segno);
  1283. *newseg = segno;
  1284. spin_unlock(&free_i->segmap_lock);
  1285. }
  1286. static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
  1287. {
  1288. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1289. struct summary_footer *sum_footer;
  1290. curseg->segno = curseg->next_segno;
  1291. curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
  1292. curseg->next_blkoff = 0;
  1293. curseg->next_segno = NULL_SEGNO;
  1294. sum_footer = &(curseg->sum_blk->footer);
  1295. memset(sum_footer, 0, sizeof(struct summary_footer));
  1296. if (IS_DATASEG(type))
  1297. SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
  1298. if (IS_NODESEG(type))
  1299. SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
  1300. __set_sit_entry_type(sbi, type, curseg->segno, modified);
  1301. }
  1302. static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
  1303. {
  1304. if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
  1305. return 0;
  1306. return CURSEG_I(sbi, type)->segno;
  1307. }
  1308. /*
  1309. * Allocate a current working segment.
  1310. * This function always allocates a free segment in LFS manner.
  1311. */
  1312. static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
  1313. {
  1314. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1315. unsigned int segno = curseg->segno;
  1316. int dir = ALLOC_LEFT;
  1317. write_sum_page(sbi, curseg->sum_blk,
  1318. GET_SUM_BLOCK(sbi, segno));
  1319. if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
  1320. dir = ALLOC_RIGHT;
  1321. if (test_opt(sbi, NOHEAP))
  1322. dir = ALLOC_RIGHT;
  1323. segno = __get_next_segno(sbi, type);
  1324. get_new_segment(sbi, &segno, new_sec, dir);
  1325. curseg->next_segno = segno;
  1326. reset_curseg(sbi, type, 1);
  1327. curseg->alloc_type = LFS;
  1328. }
  1329. static void __next_free_blkoff(struct f2fs_sb_info *sbi,
  1330. struct curseg_info *seg, block_t start)
  1331. {
  1332. struct seg_entry *se = get_seg_entry(sbi, seg->segno);
  1333. int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
  1334. unsigned long *target_map = SIT_I(sbi)->tmp_map;
  1335. unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
  1336. unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
  1337. int i, pos;
  1338. for (i = 0; i < entries; i++)
  1339. target_map[i] = ckpt_map[i] | cur_map[i];
  1340. pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
  1341. seg->next_blkoff = pos;
  1342. }
  1343. /*
  1344. * If a segment is written by LFS manner, next block offset is just obtained
  1345. * by increasing the current block offset. However, if a segment is written by
  1346. * SSR manner, next block offset obtained by calling __next_free_blkoff
  1347. */
  1348. static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
  1349. struct curseg_info *seg)
  1350. {
  1351. if (seg->alloc_type == SSR)
  1352. __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
  1353. else
  1354. seg->next_blkoff++;
  1355. }
  1356. /*
  1357. * This function always allocates a used segment(from dirty seglist) by SSR
  1358. * manner, so it should recover the existing segment information of valid blocks
  1359. */
  1360. static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
  1361. {
  1362. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  1363. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1364. unsigned int new_segno = curseg->next_segno;
  1365. struct f2fs_summary_block *sum_node;
  1366. struct page *sum_page;
  1367. write_sum_page(sbi, curseg->sum_blk,
  1368. GET_SUM_BLOCK(sbi, curseg->segno));
  1369. __set_test_and_inuse(sbi, new_segno);
  1370. mutex_lock(&dirty_i->seglist_lock);
  1371. __remove_dirty_segment(sbi, new_segno, PRE);
  1372. __remove_dirty_segment(sbi, new_segno, DIRTY);
  1373. mutex_unlock(&dirty_i->seglist_lock);
  1374. reset_curseg(sbi, type, 1);
  1375. curseg->alloc_type = SSR;
  1376. __next_free_blkoff(sbi, curseg, 0);
  1377. if (reuse) {
  1378. sum_page = get_sum_page(sbi, new_segno);
  1379. sum_node = (struct f2fs_summary_block *)page_address(sum_page);
  1380. memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
  1381. f2fs_put_page(sum_page, 1);
  1382. }
  1383. }
  1384. static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
  1385. {
  1386. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1387. const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
  1388. int i, cnt;
  1389. bool reversed = false;
  1390. /* need_SSR() already forces to do this */
  1391. if (v_ops->get_victim(sbi, &(curseg)->next_segno, BG_GC, type, SSR))
  1392. return 1;
  1393. /* For node segments, let's do SSR more intensively */
  1394. if (IS_NODESEG(type)) {
  1395. if (type >= CURSEG_WARM_NODE) {
  1396. reversed = true;
  1397. i = CURSEG_COLD_NODE;
  1398. } else {
  1399. i = CURSEG_HOT_NODE;
  1400. }
  1401. cnt = NR_CURSEG_NODE_TYPE;
  1402. } else {
  1403. if (type >= CURSEG_WARM_DATA) {
  1404. reversed = true;
  1405. i = CURSEG_COLD_DATA;
  1406. } else {
  1407. i = CURSEG_HOT_DATA;
  1408. }
  1409. cnt = NR_CURSEG_DATA_TYPE;
  1410. }
  1411. for (; cnt-- > 0; reversed ? i-- : i++) {
  1412. if (i == type)
  1413. continue;
  1414. if (v_ops->get_victim(sbi, &(curseg)->next_segno,
  1415. BG_GC, i, SSR))
  1416. return 1;
  1417. }
  1418. return 0;
  1419. }
  1420. /*
  1421. * flush out current segment and replace it with new segment
  1422. * This function should be returned with success, otherwise BUG
  1423. */
  1424. static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
  1425. int type, bool force)
  1426. {
  1427. if (force)
  1428. new_curseg(sbi, type, true);
  1429. else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
  1430. type == CURSEG_WARM_NODE)
  1431. new_curseg(sbi, type, false);
  1432. else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
  1433. change_curseg(sbi, type, true);
  1434. else
  1435. new_curseg(sbi, type, false);
  1436. stat_inc_seg_type(sbi, CURSEG_I(sbi, type));
  1437. }
  1438. void allocate_new_segments(struct f2fs_sb_info *sbi)
  1439. {
  1440. struct curseg_info *curseg;
  1441. unsigned int old_segno;
  1442. int i;
  1443. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  1444. curseg = CURSEG_I(sbi, i);
  1445. old_segno = curseg->segno;
  1446. SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
  1447. locate_dirty_segment(sbi, old_segno);
  1448. }
  1449. }
  1450. static const struct segment_allocation default_salloc_ops = {
  1451. .allocate_segment = allocate_segment_by_default,
  1452. };
  1453. bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  1454. {
  1455. __u64 trim_start = cpc->trim_start;
  1456. bool has_candidate = false;
  1457. mutex_lock(&SIT_I(sbi)->sentry_lock);
  1458. for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
  1459. if (add_discard_addrs(sbi, cpc, true)) {
  1460. has_candidate = true;
  1461. break;
  1462. }
  1463. }
  1464. mutex_unlock(&SIT_I(sbi)->sentry_lock);
  1465. cpc->trim_start = trim_start;
  1466. return has_candidate;
  1467. }
  1468. int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
  1469. {
  1470. __u64 start = F2FS_BYTES_TO_BLK(range->start);
  1471. __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
  1472. unsigned int start_segno, end_segno;
  1473. struct cp_control cpc;
  1474. int err = 0;
  1475. if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
  1476. return -EINVAL;
  1477. cpc.trimmed = 0;
  1478. if (end <= MAIN_BLKADDR(sbi))
  1479. goto out;
  1480. if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
  1481. f2fs_msg(sbi->sb, KERN_WARNING,
  1482. "Found FS corruption, run fsck to fix.");
  1483. goto out;
  1484. }
  1485. /* start/end segment number in main_area */
  1486. start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
  1487. end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
  1488. GET_SEGNO(sbi, end);
  1489. cpc.reason = CP_DISCARD;
  1490. cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
  1491. /* do checkpoint to issue discard commands safely */
  1492. for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
  1493. cpc.trim_start = start_segno;
  1494. if (sbi->discard_blks == 0)
  1495. break;
  1496. else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
  1497. cpc.trim_end = end_segno;
  1498. else
  1499. cpc.trim_end = min_t(unsigned int,
  1500. rounddown(start_segno +
  1501. BATCHED_TRIM_SEGMENTS(sbi),
  1502. sbi->segs_per_sec) - 1, end_segno);
  1503. mutex_lock(&sbi->gc_mutex);
  1504. err = write_checkpoint(sbi, &cpc);
  1505. mutex_unlock(&sbi->gc_mutex);
  1506. if (err)
  1507. break;
  1508. schedule();
  1509. }
  1510. out:
  1511. range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
  1512. return err;
  1513. }
  1514. static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
  1515. {
  1516. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1517. if (curseg->next_blkoff < sbi->blocks_per_seg)
  1518. return true;
  1519. return false;
  1520. }
  1521. static int __get_segment_type_2(struct page *page, enum page_type p_type)
  1522. {
  1523. if (p_type == DATA)
  1524. return CURSEG_HOT_DATA;
  1525. else
  1526. return CURSEG_HOT_NODE;
  1527. }
  1528. static int __get_segment_type_4(struct page *page, enum page_type p_type)
  1529. {
  1530. if (p_type == DATA) {
  1531. struct inode *inode = page->mapping->host;
  1532. if (S_ISDIR(inode->i_mode))
  1533. return CURSEG_HOT_DATA;
  1534. else
  1535. return CURSEG_COLD_DATA;
  1536. } else {
  1537. if (IS_DNODE(page) && is_cold_node(page))
  1538. return CURSEG_WARM_NODE;
  1539. else
  1540. return CURSEG_COLD_NODE;
  1541. }
  1542. }
  1543. static int __get_segment_type_6(struct page *page, enum page_type p_type)
  1544. {
  1545. if (p_type == DATA) {
  1546. struct inode *inode = page->mapping->host;
  1547. if (is_cold_data(page) || file_is_cold(inode))
  1548. return CURSEG_COLD_DATA;
  1549. if (is_inode_flag_set(inode, FI_HOT_DATA))
  1550. return CURSEG_HOT_DATA;
  1551. return CURSEG_WARM_DATA;
  1552. } else {
  1553. if (IS_DNODE(page))
  1554. return is_cold_node(page) ? CURSEG_WARM_NODE :
  1555. CURSEG_HOT_NODE;
  1556. return CURSEG_COLD_NODE;
  1557. }
  1558. }
  1559. static int __get_segment_type(struct page *page, enum page_type p_type)
  1560. {
  1561. switch (F2FS_P_SB(page)->active_logs) {
  1562. case 2:
  1563. return __get_segment_type_2(page, p_type);
  1564. case 4:
  1565. return __get_segment_type_4(page, p_type);
  1566. }
  1567. /* NR_CURSEG_TYPE(6) logs by default */
  1568. f2fs_bug_on(F2FS_P_SB(page),
  1569. F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
  1570. return __get_segment_type_6(page, p_type);
  1571. }
  1572. void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
  1573. block_t old_blkaddr, block_t *new_blkaddr,
  1574. struct f2fs_summary *sum, int type)
  1575. {
  1576. struct sit_info *sit_i = SIT_I(sbi);
  1577. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1578. mutex_lock(&curseg->curseg_mutex);
  1579. mutex_lock(&sit_i->sentry_lock);
  1580. *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
  1581. f2fs_wait_discard_bio(sbi, *new_blkaddr);
  1582. /*
  1583. * __add_sum_entry should be resided under the curseg_mutex
  1584. * because, this function updates a summary entry in the
  1585. * current summary block.
  1586. */
  1587. __add_sum_entry(sbi, type, sum);
  1588. __refresh_next_blkoff(sbi, curseg);
  1589. stat_inc_block_count(sbi, curseg);
  1590. if (!__has_curseg_space(sbi, type))
  1591. sit_i->s_ops->allocate_segment(sbi, type, false);
  1592. /*
  1593. * SIT information should be updated after segment allocation,
  1594. * since we need to keep dirty segments precisely under SSR.
  1595. */
  1596. refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
  1597. mutex_unlock(&sit_i->sentry_lock);
  1598. if (page && IS_NODESEG(type))
  1599. fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
  1600. mutex_unlock(&curseg->curseg_mutex);
  1601. }
  1602. static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
  1603. {
  1604. int type = __get_segment_type(fio->page, fio->type);
  1605. int err;
  1606. if (fio->type == NODE || fio->type == DATA)
  1607. mutex_lock(&fio->sbi->wio_mutex[fio->type]);
  1608. reallocate:
  1609. allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
  1610. &fio->new_blkaddr, sum, type);
  1611. /* writeout dirty page into bdev */
  1612. err = f2fs_submit_page_mbio(fio);
  1613. if (err == -EAGAIN) {
  1614. fio->old_blkaddr = fio->new_blkaddr;
  1615. goto reallocate;
  1616. }
  1617. if (fio->type == NODE || fio->type == DATA)
  1618. mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
  1619. }
  1620. void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
  1621. {
  1622. struct f2fs_io_info fio = {
  1623. .sbi = sbi,
  1624. .type = META,
  1625. .op = REQ_OP_WRITE,
  1626. .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
  1627. .old_blkaddr = page->index,
  1628. .new_blkaddr = page->index,
  1629. .page = page,
  1630. .encrypted_page = NULL,
  1631. };
  1632. if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
  1633. fio.op_flags &= ~REQ_META;
  1634. set_page_writeback(page);
  1635. f2fs_submit_page_mbio(&fio);
  1636. }
  1637. void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
  1638. {
  1639. struct f2fs_summary sum;
  1640. set_summary(&sum, nid, 0, 0);
  1641. do_write_page(&sum, fio);
  1642. }
  1643. void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
  1644. {
  1645. struct f2fs_sb_info *sbi = fio->sbi;
  1646. struct f2fs_summary sum;
  1647. struct node_info ni;
  1648. f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
  1649. get_node_info(sbi, dn->nid, &ni);
  1650. set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
  1651. do_write_page(&sum, fio);
  1652. f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
  1653. }
  1654. int rewrite_data_page(struct f2fs_io_info *fio)
  1655. {
  1656. fio->new_blkaddr = fio->old_blkaddr;
  1657. stat_inc_inplace_blocks(fio->sbi);
  1658. return f2fs_submit_page_bio(fio);
  1659. }
  1660. void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
  1661. block_t old_blkaddr, block_t new_blkaddr,
  1662. bool recover_curseg, bool recover_newaddr)
  1663. {
  1664. struct sit_info *sit_i = SIT_I(sbi);
  1665. struct curseg_info *curseg;
  1666. unsigned int segno, old_cursegno;
  1667. struct seg_entry *se;
  1668. int type;
  1669. unsigned short old_blkoff;
  1670. segno = GET_SEGNO(sbi, new_blkaddr);
  1671. se = get_seg_entry(sbi, segno);
  1672. type = se->type;
  1673. if (!recover_curseg) {
  1674. /* for recovery flow */
  1675. if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
  1676. if (old_blkaddr == NULL_ADDR)
  1677. type = CURSEG_COLD_DATA;
  1678. else
  1679. type = CURSEG_WARM_DATA;
  1680. }
  1681. } else {
  1682. if (!IS_CURSEG(sbi, segno))
  1683. type = CURSEG_WARM_DATA;
  1684. }
  1685. curseg = CURSEG_I(sbi, type);
  1686. mutex_lock(&curseg->curseg_mutex);
  1687. mutex_lock(&sit_i->sentry_lock);
  1688. old_cursegno = curseg->segno;
  1689. old_blkoff = curseg->next_blkoff;
  1690. /* change the current segment */
  1691. if (segno != curseg->segno) {
  1692. curseg->next_segno = segno;
  1693. change_curseg(sbi, type, true);
  1694. }
  1695. curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
  1696. __add_sum_entry(sbi, type, sum);
  1697. if (!recover_curseg || recover_newaddr)
  1698. update_sit_entry(sbi, new_blkaddr, 1);
  1699. if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
  1700. update_sit_entry(sbi, old_blkaddr, -1);
  1701. locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
  1702. locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
  1703. locate_dirty_segment(sbi, old_cursegno);
  1704. if (recover_curseg) {
  1705. if (old_cursegno != curseg->segno) {
  1706. curseg->next_segno = old_cursegno;
  1707. change_curseg(sbi, type, true);
  1708. }
  1709. curseg->next_blkoff = old_blkoff;
  1710. }
  1711. mutex_unlock(&sit_i->sentry_lock);
  1712. mutex_unlock(&curseg->curseg_mutex);
  1713. }
  1714. void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
  1715. block_t old_addr, block_t new_addr,
  1716. unsigned char version, bool recover_curseg,
  1717. bool recover_newaddr)
  1718. {
  1719. struct f2fs_summary sum;
  1720. set_summary(&sum, dn->nid, dn->ofs_in_node, version);
  1721. __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
  1722. recover_curseg, recover_newaddr);
  1723. f2fs_update_data_blkaddr(dn, new_addr);
  1724. }
  1725. void f2fs_wait_on_page_writeback(struct page *page,
  1726. enum page_type type, bool ordered)
  1727. {
  1728. if (PageWriteback(page)) {
  1729. struct f2fs_sb_info *sbi = F2FS_P_SB(page);
  1730. f2fs_submit_merged_bio_cond(sbi, page->mapping->host,
  1731. 0, page->index, type, WRITE);
  1732. if (ordered)
  1733. wait_on_page_writeback(page);
  1734. else
  1735. wait_for_stable_page(page);
  1736. }
  1737. }
  1738. void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
  1739. block_t blkaddr)
  1740. {
  1741. struct page *cpage;
  1742. if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
  1743. return;
  1744. cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
  1745. if (cpage) {
  1746. f2fs_wait_on_page_writeback(cpage, DATA, true);
  1747. f2fs_put_page(cpage, 1);
  1748. }
  1749. }
  1750. static int read_compacted_summaries(struct f2fs_sb_info *sbi)
  1751. {
  1752. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  1753. struct curseg_info *seg_i;
  1754. unsigned char *kaddr;
  1755. struct page *page;
  1756. block_t start;
  1757. int i, j, offset;
  1758. start = start_sum_block(sbi);
  1759. page = get_meta_page(sbi, start++);
  1760. kaddr = (unsigned char *)page_address(page);
  1761. /* Step 1: restore nat cache */
  1762. seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
  1763. memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
  1764. /* Step 2: restore sit cache */
  1765. seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
  1766. memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
  1767. offset = 2 * SUM_JOURNAL_SIZE;
  1768. /* Step 3: restore summary entries */
  1769. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  1770. unsigned short blk_off;
  1771. unsigned int segno;
  1772. seg_i = CURSEG_I(sbi, i);
  1773. segno = le32_to_cpu(ckpt->cur_data_segno[i]);
  1774. blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
  1775. seg_i->next_segno = segno;
  1776. reset_curseg(sbi, i, 0);
  1777. seg_i->alloc_type = ckpt->alloc_type[i];
  1778. seg_i->next_blkoff = blk_off;
  1779. if (seg_i->alloc_type == SSR)
  1780. blk_off = sbi->blocks_per_seg;
  1781. for (j = 0; j < blk_off; j++) {
  1782. struct f2fs_summary *s;
  1783. s = (struct f2fs_summary *)(kaddr + offset);
  1784. seg_i->sum_blk->entries[j] = *s;
  1785. offset += SUMMARY_SIZE;
  1786. if (offset + SUMMARY_SIZE <= PAGE_SIZE -
  1787. SUM_FOOTER_SIZE)
  1788. continue;
  1789. f2fs_put_page(page, 1);
  1790. page = NULL;
  1791. page = get_meta_page(sbi, start++);
  1792. kaddr = (unsigned char *)page_address(page);
  1793. offset = 0;
  1794. }
  1795. }
  1796. f2fs_put_page(page, 1);
  1797. return 0;
  1798. }
  1799. static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
  1800. {
  1801. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  1802. struct f2fs_summary_block *sum;
  1803. struct curseg_info *curseg;
  1804. struct page *new;
  1805. unsigned short blk_off;
  1806. unsigned int segno = 0;
  1807. block_t blk_addr = 0;
  1808. /* get segment number and block addr */
  1809. if (IS_DATASEG(type)) {
  1810. segno = le32_to_cpu(ckpt->cur_data_segno[type]);
  1811. blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
  1812. CURSEG_HOT_DATA]);
  1813. if (__exist_node_summaries(sbi))
  1814. blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
  1815. else
  1816. blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
  1817. } else {
  1818. segno = le32_to_cpu(ckpt->cur_node_segno[type -
  1819. CURSEG_HOT_NODE]);
  1820. blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
  1821. CURSEG_HOT_NODE]);
  1822. if (__exist_node_summaries(sbi))
  1823. blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
  1824. type - CURSEG_HOT_NODE);
  1825. else
  1826. blk_addr = GET_SUM_BLOCK(sbi, segno);
  1827. }
  1828. new = get_meta_page(sbi, blk_addr);
  1829. sum = (struct f2fs_summary_block *)page_address(new);
  1830. if (IS_NODESEG(type)) {
  1831. if (__exist_node_summaries(sbi)) {
  1832. struct f2fs_summary *ns = &sum->entries[0];
  1833. int i;
  1834. for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
  1835. ns->version = 0;
  1836. ns->ofs_in_node = 0;
  1837. }
  1838. } else {
  1839. int err;
  1840. err = restore_node_summary(sbi, segno, sum);
  1841. if (err) {
  1842. f2fs_put_page(new, 1);
  1843. return err;
  1844. }
  1845. }
  1846. }
  1847. /* set uncompleted segment to curseg */
  1848. curseg = CURSEG_I(sbi, type);
  1849. mutex_lock(&curseg->curseg_mutex);
  1850. /* update journal info */
  1851. down_write(&curseg->journal_rwsem);
  1852. memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
  1853. up_write(&curseg->journal_rwsem);
  1854. memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
  1855. memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
  1856. curseg->next_segno = segno;
  1857. reset_curseg(sbi, type, 0);
  1858. curseg->alloc_type = ckpt->alloc_type[type];
  1859. curseg->next_blkoff = blk_off;
  1860. mutex_unlock(&curseg->curseg_mutex);
  1861. f2fs_put_page(new, 1);
  1862. return 0;
  1863. }
  1864. static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
  1865. {
  1866. int type = CURSEG_HOT_DATA;
  1867. int err;
  1868. if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
  1869. int npages = npages_for_summary_flush(sbi, true);
  1870. if (npages >= 2)
  1871. ra_meta_pages(sbi, start_sum_block(sbi), npages,
  1872. META_CP, true);
  1873. /* restore for compacted data summary */
  1874. if (read_compacted_summaries(sbi))
  1875. return -EINVAL;
  1876. type = CURSEG_HOT_NODE;
  1877. }
  1878. if (__exist_node_summaries(sbi))
  1879. ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
  1880. NR_CURSEG_TYPE - type, META_CP, true);
  1881. for (; type <= CURSEG_COLD_NODE; type++) {
  1882. err = read_normal_summaries(sbi, type);
  1883. if (err)
  1884. return err;
  1885. }
  1886. return 0;
  1887. }
  1888. static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
  1889. {
  1890. struct page *page;
  1891. unsigned char *kaddr;
  1892. struct f2fs_summary *summary;
  1893. struct curseg_info *seg_i;
  1894. int written_size = 0;
  1895. int i, j;
  1896. page = grab_meta_page(sbi, blkaddr++);
  1897. kaddr = (unsigned char *)page_address(page);
  1898. /* Step 1: write nat cache */
  1899. seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
  1900. memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
  1901. written_size += SUM_JOURNAL_SIZE;
  1902. /* Step 2: write sit cache */
  1903. seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
  1904. memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
  1905. written_size += SUM_JOURNAL_SIZE;
  1906. /* Step 3: write summary entries */
  1907. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  1908. unsigned short blkoff;
  1909. seg_i = CURSEG_I(sbi, i);
  1910. if (sbi->ckpt->alloc_type[i] == SSR)
  1911. blkoff = sbi->blocks_per_seg;
  1912. else
  1913. blkoff = curseg_blkoff(sbi, i);
  1914. for (j = 0; j < blkoff; j++) {
  1915. if (!page) {
  1916. page = grab_meta_page(sbi, blkaddr++);
  1917. kaddr = (unsigned char *)page_address(page);
  1918. written_size = 0;
  1919. }
  1920. summary = (struct f2fs_summary *)(kaddr + written_size);
  1921. *summary = seg_i->sum_blk->entries[j];
  1922. written_size += SUMMARY_SIZE;
  1923. if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
  1924. SUM_FOOTER_SIZE)
  1925. continue;
  1926. set_page_dirty(page);
  1927. f2fs_put_page(page, 1);
  1928. page = NULL;
  1929. }
  1930. }
  1931. if (page) {
  1932. set_page_dirty(page);
  1933. f2fs_put_page(page, 1);
  1934. }
  1935. }
  1936. static void write_normal_summaries(struct f2fs_sb_info *sbi,
  1937. block_t blkaddr, int type)
  1938. {
  1939. int i, end;
  1940. if (IS_DATASEG(type))
  1941. end = type + NR_CURSEG_DATA_TYPE;
  1942. else
  1943. end = type + NR_CURSEG_NODE_TYPE;
  1944. for (i = type; i < end; i++)
  1945. write_current_sum_page(sbi, i, blkaddr + (i - type));
  1946. }
  1947. void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
  1948. {
  1949. if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
  1950. write_compacted_summaries(sbi, start_blk);
  1951. else
  1952. write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
  1953. }
  1954. void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
  1955. {
  1956. write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
  1957. }
  1958. int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
  1959. unsigned int val, int alloc)
  1960. {
  1961. int i;
  1962. if (type == NAT_JOURNAL) {
  1963. for (i = 0; i < nats_in_cursum(journal); i++) {
  1964. if (le32_to_cpu(nid_in_journal(journal, i)) == val)
  1965. return i;
  1966. }
  1967. if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
  1968. return update_nats_in_cursum(journal, 1);
  1969. } else if (type == SIT_JOURNAL) {
  1970. for (i = 0; i < sits_in_cursum(journal); i++)
  1971. if (le32_to_cpu(segno_in_journal(journal, i)) == val)
  1972. return i;
  1973. if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
  1974. return update_sits_in_cursum(journal, 1);
  1975. }
  1976. return -1;
  1977. }
  1978. static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
  1979. unsigned int segno)
  1980. {
  1981. return get_meta_page(sbi, current_sit_addr(sbi, segno));
  1982. }
  1983. static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
  1984. unsigned int start)
  1985. {
  1986. struct sit_info *sit_i = SIT_I(sbi);
  1987. struct page *src_page, *dst_page;
  1988. pgoff_t src_off, dst_off;
  1989. void *src_addr, *dst_addr;
  1990. src_off = current_sit_addr(sbi, start);
  1991. dst_off = next_sit_addr(sbi, src_off);
  1992. /* get current sit block page without lock */
  1993. src_page = get_meta_page(sbi, src_off);
  1994. dst_page = grab_meta_page(sbi, dst_off);
  1995. f2fs_bug_on(sbi, PageDirty(src_page));
  1996. src_addr = page_address(src_page);
  1997. dst_addr = page_address(dst_page);
  1998. memcpy(dst_addr, src_addr, PAGE_SIZE);
  1999. set_page_dirty(dst_page);
  2000. f2fs_put_page(src_page, 1);
  2001. set_to_next_sit(sit_i, start);
  2002. return dst_page;
  2003. }
  2004. static struct sit_entry_set *grab_sit_entry_set(void)
  2005. {
  2006. struct sit_entry_set *ses =
  2007. f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
  2008. ses->entry_cnt = 0;
  2009. INIT_LIST_HEAD(&ses->set_list);
  2010. return ses;
  2011. }
  2012. static void release_sit_entry_set(struct sit_entry_set *ses)
  2013. {
  2014. list_del(&ses->set_list);
  2015. kmem_cache_free(sit_entry_set_slab, ses);
  2016. }
  2017. static void adjust_sit_entry_set(struct sit_entry_set *ses,
  2018. struct list_head *head)
  2019. {
  2020. struct sit_entry_set *next = ses;
  2021. if (list_is_last(&ses->set_list, head))
  2022. return;
  2023. list_for_each_entry_continue(next, head, set_list)
  2024. if (ses->entry_cnt <= next->entry_cnt)
  2025. break;
  2026. list_move_tail(&ses->set_list, &next->set_list);
  2027. }
  2028. static void add_sit_entry(unsigned int segno, struct list_head *head)
  2029. {
  2030. struct sit_entry_set *ses;
  2031. unsigned int start_segno = START_SEGNO(segno);
  2032. list_for_each_entry(ses, head, set_list) {
  2033. if (ses->start_segno == start_segno) {
  2034. ses->entry_cnt++;
  2035. adjust_sit_entry_set(ses, head);
  2036. return;
  2037. }
  2038. }
  2039. ses = grab_sit_entry_set();
  2040. ses->start_segno = start_segno;
  2041. ses->entry_cnt++;
  2042. list_add(&ses->set_list, head);
  2043. }
  2044. static void add_sits_in_set(struct f2fs_sb_info *sbi)
  2045. {
  2046. struct f2fs_sm_info *sm_info = SM_I(sbi);
  2047. struct list_head *set_list = &sm_info->sit_entry_set;
  2048. unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
  2049. unsigned int segno;
  2050. for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
  2051. add_sit_entry(segno, set_list);
  2052. }
  2053. static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
  2054. {
  2055. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
  2056. struct f2fs_journal *journal = curseg->journal;
  2057. int i;
  2058. down_write(&curseg->journal_rwsem);
  2059. for (i = 0; i < sits_in_cursum(journal); i++) {
  2060. unsigned int segno;
  2061. bool dirtied;
  2062. segno = le32_to_cpu(segno_in_journal(journal, i));
  2063. dirtied = __mark_sit_entry_dirty(sbi, segno);
  2064. if (!dirtied)
  2065. add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
  2066. }
  2067. update_sits_in_cursum(journal, -i);
  2068. up_write(&curseg->journal_rwsem);
  2069. }
  2070. /*
  2071. * CP calls this function, which flushes SIT entries including sit_journal,
  2072. * and moves prefree segs to free segs.
  2073. */
  2074. void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  2075. {
  2076. struct sit_info *sit_i = SIT_I(sbi);
  2077. unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
  2078. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
  2079. struct f2fs_journal *journal = curseg->journal;
  2080. struct sit_entry_set *ses, *tmp;
  2081. struct list_head *head = &SM_I(sbi)->sit_entry_set;
  2082. bool to_journal = true;
  2083. struct seg_entry *se;
  2084. mutex_lock(&sit_i->sentry_lock);
  2085. if (!sit_i->dirty_sentries)
  2086. goto out;
  2087. /*
  2088. * add and account sit entries of dirty bitmap in sit entry
  2089. * set temporarily
  2090. */
  2091. add_sits_in_set(sbi);
  2092. /*
  2093. * if there are no enough space in journal to store dirty sit
  2094. * entries, remove all entries from journal and add and account
  2095. * them in sit entry set.
  2096. */
  2097. if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
  2098. remove_sits_in_journal(sbi);
  2099. /*
  2100. * there are two steps to flush sit entries:
  2101. * #1, flush sit entries to journal in current cold data summary block.
  2102. * #2, flush sit entries to sit page.
  2103. */
  2104. list_for_each_entry_safe(ses, tmp, head, set_list) {
  2105. struct page *page = NULL;
  2106. struct f2fs_sit_block *raw_sit = NULL;
  2107. unsigned int start_segno = ses->start_segno;
  2108. unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
  2109. (unsigned long)MAIN_SEGS(sbi));
  2110. unsigned int segno = start_segno;
  2111. if (to_journal &&
  2112. !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
  2113. to_journal = false;
  2114. if (to_journal) {
  2115. down_write(&curseg->journal_rwsem);
  2116. } else {
  2117. page = get_next_sit_page(sbi, start_segno);
  2118. raw_sit = page_address(page);
  2119. }
  2120. /* flush dirty sit entries in region of current sit set */
  2121. for_each_set_bit_from(segno, bitmap, end) {
  2122. int offset, sit_offset;
  2123. se = get_seg_entry(sbi, segno);
  2124. /* add discard candidates */
  2125. if (cpc->reason != CP_DISCARD) {
  2126. cpc->trim_start = segno;
  2127. add_discard_addrs(sbi, cpc, false);
  2128. }
  2129. if (to_journal) {
  2130. offset = lookup_journal_in_cursum(journal,
  2131. SIT_JOURNAL, segno, 1);
  2132. f2fs_bug_on(sbi, offset < 0);
  2133. segno_in_journal(journal, offset) =
  2134. cpu_to_le32(segno);
  2135. seg_info_to_raw_sit(se,
  2136. &sit_in_journal(journal, offset));
  2137. } else {
  2138. sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
  2139. seg_info_to_raw_sit(se,
  2140. &raw_sit->entries[sit_offset]);
  2141. }
  2142. __clear_bit(segno, bitmap);
  2143. sit_i->dirty_sentries--;
  2144. ses->entry_cnt--;
  2145. }
  2146. if (to_journal)
  2147. up_write(&curseg->journal_rwsem);
  2148. else
  2149. f2fs_put_page(page, 1);
  2150. f2fs_bug_on(sbi, ses->entry_cnt);
  2151. release_sit_entry_set(ses);
  2152. }
  2153. f2fs_bug_on(sbi, !list_empty(head));
  2154. f2fs_bug_on(sbi, sit_i->dirty_sentries);
  2155. out:
  2156. if (cpc->reason == CP_DISCARD) {
  2157. __u64 trim_start = cpc->trim_start;
  2158. for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
  2159. add_discard_addrs(sbi, cpc, false);
  2160. cpc->trim_start = trim_start;
  2161. }
  2162. mutex_unlock(&sit_i->sentry_lock);
  2163. set_prefree_as_free_segments(sbi);
  2164. }
  2165. static int build_sit_info(struct f2fs_sb_info *sbi)
  2166. {
  2167. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  2168. struct sit_info *sit_i;
  2169. unsigned int sit_segs, start;
  2170. char *src_bitmap;
  2171. unsigned int bitmap_size;
  2172. /* allocate memory for SIT information */
  2173. sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
  2174. if (!sit_i)
  2175. return -ENOMEM;
  2176. SM_I(sbi)->sit_info = sit_i;
  2177. sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) *
  2178. sizeof(struct seg_entry), GFP_KERNEL);
  2179. if (!sit_i->sentries)
  2180. return -ENOMEM;
  2181. bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
  2182. sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
  2183. if (!sit_i->dirty_sentries_bitmap)
  2184. return -ENOMEM;
  2185. for (start = 0; start < MAIN_SEGS(sbi); start++) {
  2186. sit_i->sentries[start].cur_valid_map
  2187. = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2188. sit_i->sentries[start].ckpt_valid_map
  2189. = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2190. if (!sit_i->sentries[start].cur_valid_map ||
  2191. !sit_i->sentries[start].ckpt_valid_map)
  2192. return -ENOMEM;
  2193. #ifdef CONFIG_F2FS_CHECK_FS
  2194. sit_i->sentries[start].cur_valid_map_mir
  2195. = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2196. if (!sit_i->sentries[start].cur_valid_map_mir)
  2197. return -ENOMEM;
  2198. #endif
  2199. if (f2fs_discard_en(sbi)) {
  2200. sit_i->sentries[start].discard_map
  2201. = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2202. if (!sit_i->sentries[start].discard_map)
  2203. return -ENOMEM;
  2204. }
  2205. }
  2206. sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2207. if (!sit_i->tmp_map)
  2208. return -ENOMEM;
  2209. if (sbi->segs_per_sec > 1) {
  2210. sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) *
  2211. sizeof(struct sec_entry), GFP_KERNEL);
  2212. if (!sit_i->sec_entries)
  2213. return -ENOMEM;
  2214. }
  2215. /* get information related with SIT */
  2216. sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
  2217. /* setup SIT bitmap from ckeckpoint pack */
  2218. bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
  2219. src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
  2220. sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
  2221. if (!sit_i->sit_bitmap)
  2222. return -ENOMEM;
  2223. #ifdef CONFIG_F2FS_CHECK_FS
  2224. sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
  2225. if (!sit_i->sit_bitmap_mir)
  2226. return -ENOMEM;
  2227. #endif
  2228. /* init SIT information */
  2229. sit_i->s_ops = &default_salloc_ops;
  2230. sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
  2231. sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
  2232. sit_i->written_valid_blocks = 0;
  2233. sit_i->bitmap_size = bitmap_size;
  2234. sit_i->dirty_sentries = 0;
  2235. sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
  2236. sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
  2237. sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
  2238. mutex_init(&sit_i->sentry_lock);
  2239. return 0;
  2240. }
  2241. static int build_free_segmap(struct f2fs_sb_info *sbi)
  2242. {
  2243. struct free_segmap_info *free_i;
  2244. unsigned int bitmap_size, sec_bitmap_size;
  2245. /* allocate memory for free segmap information */
  2246. free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
  2247. if (!free_i)
  2248. return -ENOMEM;
  2249. SM_I(sbi)->free_info = free_i;
  2250. bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
  2251. free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL);
  2252. if (!free_i->free_segmap)
  2253. return -ENOMEM;
  2254. sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
  2255. free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL);
  2256. if (!free_i->free_secmap)
  2257. return -ENOMEM;
  2258. /* set all segments as dirty temporarily */
  2259. memset(free_i->free_segmap, 0xff, bitmap_size);
  2260. memset(free_i->free_secmap, 0xff, sec_bitmap_size);
  2261. /* init free segmap information */
  2262. free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
  2263. free_i->free_segments = 0;
  2264. free_i->free_sections = 0;
  2265. spin_lock_init(&free_i->segmap_lock);
  2266. return 0;
  2267. }
  2268. static int build_curseg(struct f2fs_sb_info *sbi)
  2269. {
  2270. struct curseg_info *array;
  2271. int i;
  2272. array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
  2273. if (!array)
  2274. return -ENOMEM;
  2275. SM_I(sbi)->curseg_array = array;
  2276. for (i = 0; i < NR_CURSEG_TYPE; i++) {
  2277. mutex_init(&array[i].curseg_mutex);
  2278. array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
  2279. if (!array[i].sum_blk)
  2280. return -ENOMEM;
  2281. init_rwsem(&array[i].journal_rwsem);
  2282. array[i].journal = kzalloc(sizeof(struct f2fs_journal),
  2283. GFP_KERNEL);
  2284. if (!array[i].journal)
  2285. return -ENOMEM;
  2286. array[i].segno = NULL_SEGNO;
  2287. array[i].next_blkoff = 0;
  2288. }
  2289. return restore_curseg_summaries(sbi);
  2290. }
  2291. static void build_sit_entries(struct f2fs_sb_info *sbi)
  2292. {
  2293. struct sit_info *sit_i = SIT_I(sbi);
  2294. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
  2295. struct f2fs_journal *journal = curseg->journal;
  2296. struct seg_entry *se;
  2297. struct f2fs_sit_entry sit;
  2298. int sit_blk_cnt = SIT_BLK_CNT(sbi);
  2299. unsigned int i, start, end;
  2300. unsigned int readed, start_blk = 0;
  2301. do {
  2302. readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
  2303. META_SIT, true);
  2304. start = start_blk * sit_i->sents_per_block;
  2305. end = (start_blk + readed) * sit_i->sents_per_block;
  2306. for (; start < end && start < MAIN_SEGS(sbi); start++) {
  2307. struct f2fs_sit_block *sit_blk;
  2308. struct page *page;
  2309. se = &sit_i->sentries[start];
  2310. page = get_current_sit_page(sbi, start);
  2311. sit_blk = (struct f2fs_sit_block *)page_address(page);
  2312. sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
  2313. f2fs_put_page(page, 1);
  2314. check_block_count(sbi, start, &sit);
  2315. seg_info_from_raw_sit(se, &sit);
  2316. /* build discard map only one time */
  2317. if (f2fs_discard_en(sbi)) {
  2318. memcpy(se->discard_map, se->cur_valid_map,
  2319. SIT_VBLOCK_MAP_SIZE);
  2320. sbi->discard_blks += sbi->blocks_per_seg -
  2321. se->valid_blocks;
  2322. }
  2323. if (sbi->segs_per_sec > 1)
  2324. get_sec_entry(sbi, start)->valid_blocks +=
  2325. se->valid_blocks;
  2326. }
  2327. start_blk += readed;
  2328. } while (start_blk < sit_blk_cnt);
  2329. down_read(&curseg->journal_rwsem);
  2330. for (i = 0; i < sits_in_cursum(journal); i++) {
  2331. unsigned int old_valid_blocks;
  2332. start = le32_to_cpu(segno_in_journal(journal, i));
  2333. se = &sit_i->sentries[start];
  2334. sit = sit_in_journal(journal, i);
  2335. old_valid_blocks = se->valid_blocks;
  2336. check_block_count(sbi, start, &sit);
  2337. seg_info_from_raw_sit(se, &sit);
  2338. if (f2fs_discard_en(sbi)) {
  2339. memcpy(se->discard_map, se->cur_valid_map,
  2340. SIT_VBLOCK_MAP_SIZE);
  2341. sbi->discard_blks += old_valid_blocks -
  2342. se->valid_blocks;
  2343. }
  2344. if (sbi->segs_per_sec > 1)
  2345. get_sec_entry(sbi, start)->valid_blocks +=
  2346. se->valid_blocks - old_valid_blocks;
  2347. }
  2348. up_read(&curseg->journal_rwsem);
  2349. }
  2350. static void init_free_segmap(struct f2fs_sb_info *sbi)
  2351. {
  2352. unsigned int start;
  2353. int type;
  2354. for (start = 0; start < MAIN_SEGS(sbi); start++) {
  2355. struct seg_entry *sentry = get_seg_entry(sbi, start);
  2356. if (!sentry->valid_blocks)
  2357. __set_free(sbi, start);
  2358. else
  2359. SIT_I(sbi)->written_valid_blocks +=
  2360. sentry->valid_blocks;
  2361. }
  2362. /* set use the current segments */
  2363. for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
  2364. struct curseg_info *curseg_t = CURSEG_I(sbi, type);
  2365. __set_test_and_inuse(sbi, curseg_t->segno);
  2366. }
  2367. }
  2368. static void init_dirty_segmap(struct f2fs_sb_info *sbi)
  2369. {
  2370. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2371. struct free_segmap_info *free_i = FREE_I(sbi);
  2372. unsigned int segno = 0, offset = 0;
  2373. unsigned short valid_blocks;
  2374. while (1) {
  2375. /* find dirty segment based on free segmap */
  2376. segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
  2377. if (segno >= MAIN_SEGS(sbi))
  2378. break;
  2379. offset = segno + 1;
  2380. valid_blocks = get_valid_blocks(sbi, segno, false);
  2381. if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
  2382. continue;
  2383. if (valid_blocks > sbi->blocks_per_seg) {
  2384. f2fs_bug_on(sbi, 1);
  2385. continue;
  2386. }
  2387. mutex_lock(&dirty_i->seglist_lock);
  2388. __locate_dirty_segment(sbi, segno, DIRTY);
  2389. mutex_unlock(&dirty_i->seglist_lock);
  2390. }
  2391. }
  2392. static int init_victim_secmap(struct f2fs_sb_info *sbi)
  2393. {
  2394. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2395. unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
  2396. dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
  2397. if (!dirty_i->victim_secmap)
  2398. return -ENOMEM;
  2399. return 0;
  2400. }
  2401. static int build_dirty_segmap(struct f2fs_sb_info *sbi)
  2402. {
  2403. struct dirty_seglist_info *dirty_i;
  2404. unsigned int bitmap_size, i;
  2405. /* allocate memory for dirty segments list information */
  2406. dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
  2407. if (!dirty_i)
  2408. return -ENOMEM;
  2409. SM_I(sbi)->dirty_info = dirty_i;
  2410. mutex_init(&dirty_i->seglist_lock);
  2411. bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
  2412. for (i = 0; i < NR_DIRTY_TYPE; i++) {
  2413. dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
  2414. if (!dirty_i->dirty_segmap[i])
  2415. return -ENOMEM;
  2416. }
  2417. init_dirty_segmap(sbi);
  2418. return init_victim_secmap(sbi);
  2419. }
  2420. /*
  2421. * Update min, max modified time for cost-benefit GC algorithm
  2422. */
  2423. static void init_min_max_mtime(struct f2fs_sb_info *sbi)
  2424. {
  2425. struct sit_info *sit_i = SIT_I(sbi);
  2426. unsigned int segno;
  2427. mutex_lock(&sit_i->sentry_lock);
  2428. sit_i->min_mtime = LLONG_MAX;
  2429. for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
  2430. unsigned int i;
  2431. unsigned long long mtime = 0;
  2432. for (i = 0; i < sbi->segs_per_sec; i++)
  2433. mtime += get_seg_entry(sbi, segno + i)->mtime;
  2434. mtime = div_u64(mtime, sbi->segs_per_sec);
  2435. if (sit_i->min_mtime > mtime)
  2436. sit_i->min_mtime = mtime;
  2437. }
  2438. sit_i->max_mtime = get_mtime(sbi);
  2439. mutex_unlock(&sit_i->sentry_lock);
  2440. }
  2441. int build_segment_manager(struct f2fs_sb_info *sbi)
  2442. {
  2443. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  2444. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  2445. struct f2fs_sm_info *sm_info;
  2446. int err;
  2447. sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
  2448. if (!sm_info)
  2449. return -ENOMEM;
  2450. /* init sm info */
  2451. sbi->sm_info = sm_info;
  2452. sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
  2453. sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
  2454. sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
  2455. sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
  2456. sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
  2457. sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
  2458. sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
  2459. sm_info->rec_prefree_segments = sm_info->main_segments *
  2460. DEF_RECLAIM_PREFREE_SEGMENTS / 100;
  2461. if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
  2462. sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
  2463. if (!test_opt(sbi, LFS))
  2464. sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
  2465. sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
  2466. sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
  2467. sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
  2468. sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
  2469. INIT_LIST_HEAD(&sm_info->sit_entry_set);
  2470. if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
  2471. err = create_flush_cmd_control(sbi);
  2472. if (err)
  2473. return err;
  2474. }
  2475. err = create_discard_cmd_control(sbi);
  2476. if (err)
  2477. return err;
  2478. err = build_sit_info(sbi);
  2479. if (err)
  2480. return err;
  2481. err = build_free_segmap(sbi);
  2482. if (err)
  2483. return err;
  2484. err = build_curseg(sbi);
  2485. if (err)
  2486. return err;
  2487. /* reinit free segmap based on SIT */
  2488. build_sit_entries(sbi);
  2489. init_free_segmap(sbi);
  2490. err = build_dirty_segmap(sbi);
  2491. if (err)
  2492. return err;
  2493. init_min_max_mtime(sbi);
  2494. return 0;
  2495. }
  2496. static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
  2497. enum dirty_type dirty_type)
  2498. {
  2499. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2500. mutex_lock(&dirty_i->seglist_lock);
  2501. kvfree(dirty_i->dirty_segmap[dirty_type]);
  2502. dirty_i->nr_dirty[dirty_type] = 0;
  2503. mutex_unlock(&dirty_i->seglist_lock);
  2504. }
  2505. static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
  2506. {
  2507. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2508. kvfree(dirty_i->victim_secmap);
  2509. }
  2510. static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
  2511. {
  2512. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2513. int i;
  2514. if (!dirty_i)
  2515. return;
  2516. /* discard pre-free/dirty segments list */
  2517. for (i = 0; i < NR_DIRTY_TYPE; i++)
  2518. discard_dirty_segmap(sbi, i);
  2519. destroy_victim_secmap(sbi);
  2520. SM_I(sbi)->dirty_info = NULL;
  2521. kfree(dirty_i);
  2522. }
  2523. static void destroy_curseg(struct f2fs_sb_info *sbi)
  2524. {
  2525. struct curseg_info *array = SM_I(sbi)->curseg_array;
  2526. int i;
  2527. if (!array)
  2528. return;
  2529. SM_I(sbi)->curseg_array = NULL;
  2530. for (i = 0; i < NR_CURSEG_TYPE; i++) {
  2531. kfree(array[i].sum_blk);
  2532. kfree(array[i].journal);
  2533. }
  2534. kfree(array);
  2535. }
  2536. static void destroy_free_segmap(struct f2fs_sb_info *sbi)
  2537. {
  2538. struct free_segmap_info *free_i = SM_I(sbi)->free_info;
  2539. if (!free_i)
  2540. return;
  2541. SM_I(sbi)->free_info = NULL;
  2542. kvfree(free_i->free_segmap);
  2543. kvfree(free_i->free_secmap);
  2544. kfree(free_i);
  2545. }
  2546. static void destroy_sit_info(struct f2fs_sb_info *sbi)
  2547. {
  2548. struct sit_info *sit_i = SIT_I(sbi);
  2549. unsigned int start;
  2550. if (!sit_i)
  2551. return;
  2552. if (sit_i->sentries) {
  2553. for (start = 0; start < MAIN_SEGS(sbi); start++) {
  2554. kfree(sit_i->sentries[start].cur_valid_map);
  2555. #ifdef CONFIG_F2FS_CHECK_FS
  2556. kfree(sit_i->sentries[start].cur_valid_map_mir);
  2557. #endif
  2558. kfree(sit_i->sentries[start].ckpt_valid_map);
  2559. kfree(sit_i->sentries[start].discard_map);
  2560. }
  2561. }
  2562. kfree(sit_i->tmp_map);
  2563. kvfree(sit_i->sentries);
  2564. kvfree(sit_i->sec_entries);
  2565. kvfree(sit_i->dirty_sentries_bitmap);
  2566. SM_I(sbi)->sit_info = NULL;
  2567. kfree(sit_i->sit_bitmap);
  2568. #ifdef CONFIG_F2FS_CHECK_FS
  2569. kfree(sit_i->sit_bitmap_mir);
  2570. #endif
  2571. kfree(sit_i);
  2572. }
  2573. void destroy_segment_manager(struct f2fs_sb_info *sbi)
  2574. {
  2575. struct f2fs_sm_info *sm_info = SM_I(sbi);
  2576. if (!sm_info)
  2577. return;
  2578. destroy_flush_cmd_control(sbi, true);
  2579. destroy_discard_cmd_control(sbi);
  2580. destroy_dirty_segmap(sbi);
  2581. destroy_curseg(sbi);
  2582. destroy_free_segmap(sbi);
  2583. destroy_sit_info(sbi);
  2584. sbi->sm_info = NULL;
  2585. kfree(sm_info);
  2586. }
  2587. int __init create_segment_manager_caches(void)
  2588. {
  2589. discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
  2590. sizeof(struct discard_entry));
  2591. if (!discard_entry_slab)
  2592. goto fail;
  2593. discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
  2594. sizeof(struct discard_cmd));
  2595. if (!discard_cmd_slab)
  2596. goto destroy_discard_entry;
  2597. sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
  2598. sizeof(struct sit_entry_set));
  2599. if (!sit_entry_set_slab)
  2600. goto destroy_discard_cmd;
  2601. inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
  2602. sizeof(struct inmem_pages));
  2603. if (!inmem_entry_slab)
  2604. goto destroy_sit_entry_set;
  2605. return 0;
  2606. destroy_sit_entry_set:
  2607. kmem_cache_destroy(sit_entry_set_slab);
  2608. destroy_discard_cmd:
  2609. kmem_cache_destroy(discard_cmd_slab);
  2610. destroy_discard_entry:
  2611. kmem_cache_destroy(discard_entry_slab);
  2612. fail:
  2613. return -ENOMEM;
  2614. }
  2615. void destroy_segment_manager_caches(void)
  2616. {
  2617. kmem_cache_destroy(sit_entry_set_slab);
  2618. kmem_cache_destroy(discard_cmd_slab);
  2619. kmem_cache_destroy(discard_entry_slab);
  2620. kmem_cache_destroy(inmem_entry_slab);
  2621. }