segment.c 85 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395
  1. /*
  2. * fs/f2fs/segment.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/f2fs_fs.h>
  13. #include <linux/bio.h>
  14. #include <linux/blkdev.h>
  15. #include <linux/prefetch.h>
  16. #include <linux/kthread.h>
  17. #include <linux/swap.h>
  18. #include <linux/timer.h>
  19. #include <linux/freezer.h>
  20. #include "f2fs.h"
  21. #include "segment.h"
  22. #include "node.h"
  23. #include "trace.h"
  24. #include <trace/events/f2fs.h>
  25. #define __reverse_ffz(x) __reverse_ffs(~(x))
  26. static struct kmem_cache *discard_entry_slab;
  27. static struct kmem_cache *discard_cmd_slab;
  28. static struct kmem_cache *sit_entry_set_slab;
  29. static struct kmem_cache *inmem_entry_slab;
  30. static unsigned long __reverse_ulong(unsigned char *str)
  31. {
  32. unsigned long tmp = 0;
  33. int shift = 24, idx = 0;
  34. #if BITS_PER_LONG == 64
  35. shift = 56;
  36. #endif
  37. while (shift >= 0) {
  38. tmp |= (unsigned long)str[idx++] << shift;
  39. shift -= BITS_PER_BYTE;
  40. }
  41. return tmp;
  42. }
  43. /*
  44. * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
  45. * MSB and LSB are reversed in a byte by f2fs_set_bit.
  46. */
  47. static inline unsigned long __reverse_ffs(unsigned long word)
  48. {
  49. int num = 0;
  50. #if BITS_PER_LONG == 64
  51. if ((word & 0xffffffff00000000UL) == 0)
  52. num += 32;
  53. else
  54. word >>= 32;
  55. #endif
  56. if ((word & 0xffff0000) == 0)
  57. num += 16;
  58. else
  59. word >>= 16;
  60. if ((word & 0xff00) == 0)
  61. num += 8;
  62. else
  63. word >>= 8;
  64. if ((word & 0xf0) == 0)
  65. num += 4;
  66. else
  67. word >>= 4;
  68. if ((word & 0xc) == 0)
  69. num += 2;
  70. else
  71. word >>= 2;
  72. if ((word & 0x2) == 0)
  73. num += 1;
  74. return num;
  75. }
  76. /*
  77. * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
  78. * f2fs_set_bit makes MSB and LSB reversed in a byte.
  79. * @size must be integral times of unsigned long.
  80. * Example:
  81. * MSB <--> LSB
  82. * f2fs_set_bit(0, bitmap) => 1000 0000
  83. * f2fs_set_bit(7, bitmap) => 0000 0001
  84. */
  85. static unsigned long __find_rev_next_bit(const unsigned long *addr,
  86. unsigned long size, unsigned long offset)
  87. {
  88. const unsigned long *p = addr + BIT_WORD(offset);
  89. unsigned long result = size;
  90. unsigned long tmp;
  91. if (offset >= size)
  92. return size;
  93. size -= (offset & ~(BITS_PER_LONG - 1));
  94. offset %= BITS_PER_LONG;
  95. while (1) {
  96. if (*p == 0)
  97. goto pass;
  98. tmp = __reverse_ulong((unsigned char *)p);
  99. tmp &= ~0UL >> offset;
  100. if (size < BITS_PER_LONG)
  101. tmp &= (~0UL << (BITS_PER_LONG - size));
  102. if (tmp)
  103. goto found;
  104. pass:
  105. if (size <= BITS_PER_LONG)
  106. break;
  107. size -= BITS_PER_LONG;
  108. offset = 0;
  109. p++;
  110. }
  111. return result;
  112. found:
  113. return result - size + __reverse_ffs(tmp);
  114. }
  115. static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
  116. unsigned long size, unsigned long offset)
  117. {
  118. const unsigned long *p = addr + BIT_WORD(offset);
  119. unsigned long result = size;
  120. unsigned long tmp;
  121. if (offset >= size)
  122. return size;
  123. size -= (offset & ~(BITS_PER_LONG - 1));
  124. offset %= BITS_PER_LONG;
  125. while (1) {
  126. if (*p == ~0UL)
  127. goto pass;
  128. tmp = __reverse_ulong((unsigned char *)p);
  129. if (offset)
  130. tmp |= ~0UL << (BITS_PER_LONG - offset);
  131. if (size < BITS_PER_LONG)
  132. tmp |= ~0UL >> size;
  133. if (tmp != ~0UL)
  134. goto found;
  135. pass:
  136. if (size <= BITS_PER_LONG)
  137. break;
  138. size -= BITS_PER_LONG;
  139. offset = 0;
  140. p++;
  141. }
  142. return result;
  143. found:
  144. return result - size + __reverse_ffz(tmp);
  145. }
  146. void register_inmem_page(struct inode *inode, struct page *page)
  147. {
  148. struct f2fs_inode_info *fi = F2FS_I(inode);
  149. struct inmem_pages *new;
  150. f2fs_trace_pid(page);
  151. set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
  152. SetPagePrivate(page);
  153. new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
  154. /* add atomic page indices to the list */
  155. new->page = page;
  156. INIT_LIST_HEAD(&new->list);
  157. /* increase reference count with clean state */
  158. mutex_lock(&fi->inmem_lock);
  159. get_page(page);
  160. list_add_tail(&new->list, &fi->inmem_pages);
  161. inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
  162. mutex_unlock(&fi->inmem_lock);
  163. trace_f2fs_register_inmem_page(page, INMEM);
  164. }
  165. static int __revoke_inmem_pages(struct inode *inode,
  166. struct list_head *head, bool drop, bool recover)
  167. {
  168. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  169. struct inmem_pages *cur, *tmp;
  170. int err = 0;
  171. list_for_each_entry_safe(cur, tmp, head, list) {
  172. struct page *page = cur->page;
  173. if (drop)
  174. trace_f2fs_commit_inmem_page(page, INMEM_DROP);
  175. lock_page(page);
  176. if (recover) {
  177. struct dnode_of_data dn;
  178. struct node_info ni;
  179. trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
  180. set_new_dnode(&dn, inode, NULL, NULL, 0);
  181. if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) {
  182. err = -EAGAIN;
  183. goto next;
  184. }
  185. get_node_info(sbi, dn.nid, &ni);
  186. f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
  187. cur->old_addr, ni.version, true, true);
  188. f2fs_put_dnode(&dn);
  189. }
  190. next:
  191. /* we don't need to invalidate this in the sccessful status */
  192. if (drop || recover)
  193. ClearPageUptodate(page);
  194. set_page_private(page, 0);
  195. ClearPagePrivate(page);
  196. f2fs_put_page(page, 1);
  197. list_del(&cur->list);
  198. kmem_cache_free(inmem_entry_slab, cur);
  199. dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
  200. }
  201. return err;
  202. }
  203. void drop_inmem_pages(struct inode *inode)
  204. {
  205. struct f2fs_inode_info *fi = F2FS_I(inode);
  206. mutex_lock(&fi->inmem_lock);
  207. __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
  208. mutex_unlock(&fi->inmem_lock);
  209. clear_inode_flag(inode, FI_ATOMIC_FILE);
  210. stat_dec_atomic_write(inode);
  211. }
  212. void drop_inmem_page(struct inode *inode, struct page *page)
  213. {
  214. struct f2fs_inode_info *fi = F2FS_I(inode);
  215. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  216. struct list_head *head = &fi->inmem_pages;
  217. struct inmem_pages *cur = NULL;
  218. f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
  219. mutex_lock(&fi->inmem_lock);
  220. list_for_each_entry(cur, head, list) {
  221. if (cur->page == page)
  222. break;
  223. }
  224. f2fs_bug_on(sbi, !cur || cur->page != page);
  225. list_del(&cur->list);
  226. mutex_unlock(&fi->inmem_lock);
  227. dec_page_count(sbi, F2FS_INMEM_PAGES);
  228. kmem_cache_free(inmem_entry_slab, cur);
  229. ClearPageUptodate(page);
  230. set_page_private(page, 0);
  231. ClearPagePrivate(page);
  232. f2fs_put_page(page, 0);
  233. trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
  234. }
  235. static int __commit_inmem_pages(struct inode *inode,
  236. struct list_head *revoke_list)
  237. {
  238. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  239. struct f2fs_inode_info *fi = F2FS_I(inode);
  240. struct inmem_pages *cur, *tmp;
  241. struct f2fs_io_info fio = {
  242. .sbi = sbi,
  243. .type = DATA,
  244. .op = REQ_OP_WRITE,
  245. .op_flags = REQ_SYNC | REQ_PRIO,
  246. };
  247. pgoff_t last_idx = ULONG_MAX;
  248. int err = 0;
  249. list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
  250. struct page *page = cur->page;
  251. lock_page(page);
  252. if (page->mapping == inode->i_mapping) {
  253. trace_f2fs_commit_inmem_page(page, INMEM);
  254. set_page_dirty(page);
  255. f2fs_wait_on_page_writeback(page, DATA, true);
  256. if (clear_page_dirty_for_io(page)) {
  257. inode_dec_dirty_pages(inode);
  258. remove_dirty_inode(inode);
  259. }
  260. fio.page = page;
  261. fio.old_blkaddr = NULL_ADDR;
  262. fio.encrypted_page = NULL;
  263. fio.need_lock = LOCK_DONE;
  264. err = do_write_data_page(&fio);
  265. if (err) {
  266. unlock_page(page);
  267. break;
  268. }
  269. /* record old blkaddr for revoking */
  270. cur->old_addr = fio.old_blkaddr;
  271. last_idx = page->index;
  272. }
  273. unlock_page(page);
  274. list_move_tail(&cur->list, revoke_list);
  275. }
  276. if (last_idx != ULONG_MAX)
  277. f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
  278. if (!err)
  279. __revoke_inmem_pages(inode, revoke_list, false, false);
  280. return err;
  281. }
  282. int commit_inmem_pages(struct inode *inode)
  283. {
  284. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  285. struct f2fs_inode_info *fi = F2FS_I(inode);
  286. struct list_head revoke_list;
  287. int err;
  288. INIT_LIST_HEAD(&revoke_list);
  289. f2fs_balance_fs(sbi, true);
  290. f2fs_lock_op(sbi);
  291. set_inode_flag(inode, FI_ATOMIC_COMMIT);
  292. mutex_lock(&fi->inmem_lock);
  293. err = __commit_inmem_pages(inode, &revoke_list);
  294. if (err) {
  295. int ret;
  296. /*
  297. * try to revoke all committed pages, but still we could fail
  298. * due to no memory or other reason, if that happened, EAGAIN
  299. * will be returned, which means in such case, transaction is
  300. * already not integrity, caller should use journal to do the
  301. * recovery or rewrite & commit last transaction. For other
  302. * error number, revoking was done by filesystem itself.
  303. */
  304. ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
  305. if (ret)
  306. err = ret;
  307. /* drop all uncommitted pages */
  308. __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
  309. }
  310. mutex_unlock(&fi->inmem_lock);
  311. clear_inode_flag(inode, FI_ATOMIC_COMMIT);
  312. f2fs_unlock_op(sbi);
  313. return err;
  314. }
  315. /*
  316. * This function balances dirty node and dentry pages.
  317. * In addition, it controls garbage collection.
  318. */
  319. void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
  320. {
  321. #ifdef CONFIG_F2FS_FAULT_INJECTION
  322. if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
  323. f2fs_show_injection_info(FAULT_CHECKPOINT);
  324. f2fs_stop_checkpoint(sbi, false);
  325. }
  326. #endif
  327. /* balance_fs_bg is able to be pending */
  328. if (need && excess_cached_nats(sbi))
  329. f2fs_balance_fs_bg(sbi);
  330. /*
  331. * We should do GC or end up with checkpoint, if there are so many dirty
  332. * dir/node pages without enough free segments.
  333. */
  334. if (has_not_enough_free_secs(sbi, 0, 0)) {
  335. mutex_lock(&sbi->gc_mutex);
  336. f2fs_gc(sbi, false, false, NULL_SEGNO);
  337. }
  338. }
  339. void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
  340. {
  341. /* try to shrink extent cache when there is no enough memory */
  342. if (!available_free_memory(sbi, EXTENT_CACHE))
  343. f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
  344. /* check the # of cached NAT entries */
  345. if (!available_free_memory(sbi, NAT_ENTRIES))
  346. try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
  347. if (!available_free_memory(sbi, FREE_NIDS))
  348. try_to_free_nids(sbi, MAX_FREE_NIDS);
  349. else
  350. build_free_nids(sbi, false, false);
  351. if (!is_idle(sbi) && !excess_dirty_nats(sbi))
  352. return;
  353. /* checkpoint is the only way to shrink partial cached entries */
  354. if (!available_free_memory(sbi, NAT_ENTRIES) ||
  355. !available_free_memory(sbi, INO_ENTRIES) ||
  356. excess_prefree_segs(sbi) ||
  357. excess_dirty_nats(sbi) ||
  358. f2fs_time_over(sbi, CP_TIME)) {
  359. if (test_opt(sbi, DATA_FLUSH)) {
  360. struct blk_plug plug;
  361. blk_start_plug(&plug);
  362. sync_dirty_inodes(sbi, FILE_INODE);
  363. blk_finish_plug(&plug);
  364. }
  365. f2fs_sync_fs(sbi->sb, true);
  366. stat_inc_bg_cp_count(sbi->stat_info);
  367. }
  368. }
  369. static int __submit_flush_wait(struct f2fs_sb_info *sbi,
  370. struct block_device *bdev)
  371. {
  372. struct bio *bio = f2fs_bio_alloc(0);
  373. int ret;
  374. bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
  375. bio->bi_bdev = bdev;
  376. ret = submit_bio_wait(bio);
  377. bio_put(bio);
  378. trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
  379. test_opt(sbi, FLUSH_MERGE), ret);
  380. return ret;
  381. }
  382. static int submit_flush_wait(struct f2fs_sb_info *sbi)
  383. {
  384. int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev);
  385. int i;
  386. if (!sbi->s_ndevs || ret)
  387. return ret;
  388. for (i = 1; i < sbi->s_ndevs; i++) {
  389. ret = __submit_flush_wait(sbi, FDEV(i).bdev);
  390. if (ret)
  391. break;
  392. }
  393. return ret;
  394. }
  395. static int issue_flush_thread(void *data)
  396. {
  397. struct f2fs_sb_info *sbi = data;
  398. struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
  399. wait_queue_head_t *q = &fcc->flush_wait_queue;
  400. repeat:
  401. if (kthread_should_stop())
  402. return 0;
  403. if (!llist_empty(&fcc->issue_list)) {
  404. struct flush_cmd *cmd, *next;
  405. int ret;
  406. fcc->dispatch_list = llist_del_all(&fcc->issue_list);
  407. fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
  408. ret = submit_flush_wait(sbi);
  409. atomic_inc(&fcc->issued_flush);
  410. llist_for_each_entry_safe(cmd, next,
  411. fcc->dispatch_list, llnode) {
  412. cmd->ret = ret;
  413. complete(&cmd->wait);
  414. }
  415. fcc->dispatch_list = NULL;
  416. }
  417. wait_event_interruptible(*q,
  418. kthread_should_stop() || !llist_empty(&fcc->issue_list));
  419. goto repeat;
  420. }
  421. int f2fs_issue_flush(struct f2fs_sb_info *sbi)
  422. {
  423. struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
  424. struct flush_cmd cmd;
  425. int ret;
  426. if (test_opt(sbi, NOBARRIER))
  427. return 0;
  428. if (!test_opt(sbi, FLUSH_MERGE)) {
  429. ret = submit_flush_wait(sbi);
  430. atomic_inc(&fcc->issued_flush);
  431. return ret;
  432. }
  433. if (!atomic_read(&fcc->issing_flush)) {
  434. atomic_inc(&fcc->issing_flush);
  435. ret = submit_flush_wait(sbi);
  436. atomic_dec(&fcc->issing_flush);
  437. atomic_inc(&fcc->issued_flush);
  438. return ret;
  439. }
  440. init_completion(&cmd.wait);
  441. atomic_inc(&fcc->issing_flush);
  442. llist_add(&cmd.llnode, &fcc->issue_list);
  443. if (!fcc->dispatch_list)
  444. wake_up(&fcc->flush_wait_queue);
  445. if (fcc->f2fs_issue_flush) {
  446. wait_for_completion(&cmd.wait);
  447. atomic_dec(&fcc->issing_flush);
  448. } else {
  449. llist_del_all(&fcc->issue_list);
  450. atomic_set(&fcc->issing_flush, 0);
  451. }
  452. return cmd.ret;
  453. }
  454. int create_flush_cmd_control(struct f2fs_sb_info *sbi)
  455. {
  456. dev_t dev = sbi->sb->s_bdev->bd_dev;
  457. struct flush_cmd_control *fcc;
  458. int err = 0;
  459. if (SM_I(sbi)->fcc_info) {
  460. fcc = SM_I(sbi)->fcc_info;
  461. goto init_thread;
  462. }
  463. fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
  464. if (!fcc)
  465. return -ENOMEM;
  466. atomic_set(&fcc->issued_flush, 0);
  467. atomic_set(&fcc->issing_flush, 0);
  468. init_waitqueue_head(&fcc->flush_wait_queue);
  469. init_llist_head(&fcc->issue_list);
  470. SM_I(sbi)->fcc_info = fcc;
  471. init_thread:
  472. fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
  473. "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
  474. if (IS_ERR(fcc->f2fs_issue_flush)) {
  475. err = PTR_ERR(fcc->f2fs_issue_flush);
  476. kfree(fcc);
  477. SM_I(sbi)->fcc_info = NULL;
  478. return err;
  479. }
  480. return err;
  481. }
  482. void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
  483. {
  484. struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
  485. if (fcc && fcc->f2fs_issue_flush) {
  486. struct task_struct *flush_thread = fcc->f2fs_issue_flush;
  487. fcc->f2fs_issue_flush = NULL;
  488. kthread_stop(flush_thread);
  489. }
  490. if (free) {
  491. kfree(fcc);
  492. SM_I(sbi)->fcc_info = NULL;
  493. }
  494. }
  495. static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
  496. enum dirty_type dirty_type)
  497. {
  498. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  499. /* need not be added */
  500. if (IS_CURSEG(sbi, segno))
  501. return;
  502. if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
  503. dirty_i->nr_dirty[dirty_type]++;
  504. if (dirty_type == DIRTY) {
  505. struct seg_entry *sentry = get_seg_entry(sbi, segno);
  506. enum dirty_type t = sentry->type;
  507. if (unlikely(t >= DIRTY)) {
  508. f2fs_bug_on(sbi, 1);
  509. return;
  510. }
  511. if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
  512. dirty_i->nr_dirty[t]++;
  513. }
  514. }
  515. static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
  516. enum dirty_type dirty_type)
  517. {
  518. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  519. if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
  520. dirty_i->nr_dirty[dirty_type]--;
  521. if (dirty_type == DIRTY) {
  522. struct seg_entry *sentry = get_seg_entry(sbi, segno);
  523. enum dirty_type t = sentry->type;
  524. if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
  525. dirty_i->nr_dirty[t]--;
  526. if (get_valid_blocks(sbi, segno, true) == 0)
  527. clear_bit(GET_SEC_FROM_SEG(sbi, segno),
  528. dirty_i->victim_secmap);
  529. }
  530. }
  531. /*
  532. * Should not occur error such as -ENOMEM.
  533. * Adding dirty entry into seglist is not critical operation.
  534. * If a given segment is one of current working segments, it won't be added.
  535. */
  536. static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
  537. {
  538. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  539. unsigned short valid_blocks;
  540. if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
  541. return;
  542. mutex_lock(&dirty_i->seglist_lock);
  543. valid_blocks = get_valid_blocks(sbi, segno, false);
  544. if (valid_blocks == 0) {
  545. __locate_dirty_segment(sbi, segno, PRE);
  546. __remove_dirty_segment(sbi, segno, DIRTY);
  547. } else if (valid_blocks < sbi->blocks_per_seg) {
  548. __locate_dirty_segment(sbi, segno, DIRTY);
  549. } else {
  550. /* Recovery routine with SSR needs this */
  551. __remove_dirty_segment(sbi, segno, DIRTY);
  552. }
  553. mutex_unlock(&dirty_i->seglist_lock);
  554. }
  555. static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
  556. struct block_device *bdev, block_t lstart,
  557. block_t start, block_t len)
  558. {
  559. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  560. struct list_head *pend_list;
  561. struct discard_cmd *dc;
  562. f2fs_bug_on(sbi, !len);
  563. pend_list = &dcc->pend_list[plist_idx(len)];
  564. dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
  565. INIT_LIST_HEAD(&dc->list);
  566. dc->bdev = bdev;
  567. dc->lstart = lstart;
  568. dc->start = start;
  569. dc->len = len;
  570. dc->ref = 0;
  571. dc->state = D_PREP;
  572. dc->error = 0;
  573. init_completion(&dc->wait);
  574. list_add_tail(&dc->list, pend_list);
  575. atomic_inc(&dcc->discard_cmd_cnt);
  576. dcc->undiscard_blks += len;
  577. return dc;
  578. }
  579. static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
  580. struct block_device *bdev, block_t lstart,
  581. block_t start, block_t len,
  582. struct rb_node *parent, struct rb_node **p)
  583. {
  584. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  585. struct discard_cmd *dc;
  586. dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
  587. rb_link_node(&dc->rb_node, parent, p);
  588. rb_insert_color(&dc->rb_node, &dcc->root);
  589. return dc;
  590. }
  591. static void __detach_discard_cmd(struct discard_cmd_control *dcc,
  592. struct discard_cmd *dc)
  593. {
  594. if (dc->state == D_DONE)
  595. atomic_dec(&dcc->issing_discard);
  596. list_del(&dc->list);
  597. rb_erase(&dc->rb_node, &dcc->root);
  598. dcc->undiscard_blks -= dc->len;
  599. kmem_cache_free(discard_cmd_slab, dc);
  600. atomic_dec(&dcc->discard_cmd_cnt);
  601. }
  602. static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
  603. struct discard_cmd *dc)
  604. {
  605. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  606. if (dc->error == -EOPNOTSUPP)
  607. dc->error = 0;
  608. if (dc->error)
  609. f2fs_msg(sbi->sb, KERN_INFO,
  610. "Issue discard failed, ret: %d", dc->error);
  611. __detach_discard_cmd(dcc, dc);
  612. }
  613. static void f2fs_submit_discard_endio(struct bio *bio)
  614. {
  615. struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
  616. dc->error = bio->bi_error;
  617. dc->state = D_DONE;
  618. complete(&dc->wait);
  619. bio_put(bio);
  620. }
  621. /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
  622. static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
  623. struct discard_cmd *dc)
  624. {
  625. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  626. struct bio *bio = NULL;
  627. if (dc->state != D_PREP)
  628. return;
  629. trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
  630. dc->error = __blkdev_issue_discard(dc->bdev,
  631. SECTOR_FROM_BLOCK(dc->start),
  632. SECTOR_FROM_BLOCK(dc->len),
  633. GFP_NOFS, 0, &bio);
  634. if (!dc->error) {
  635. /* should keep before submission to avoid D_DONE right away */
  636. dc->state = D_SUBMIT;
  637. atomic_inc(&dcc->issued_discard);
  638. atomic_inc(&dcc->issing_discard);
  639. if (bio) {
  640. bio->bi_private = dc;
  641. bio->bi_end_io = f2fs_submit_discard_endio;
  642. bio->bi_opf |= REQ_SYNC;
  643. submit_bio(bio);
  644. list_move_tail(&dc->list, &dcc->wait_list);
  645. }
  646. } else {
  647. __remove_discard_cmd(sbi, dc);
  648. }
  649. }
  650. static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
  651. struct block_device *bdev, block_t lstart,
  652. block_t start, block_t len,
  653. struct rb_node **insert_p,
  654. struct rb_node *insert_parent)
  655. {
  656. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  657. struct rb_node **p = &dcc->root.rb_node;
  658. struct rb_node *parent = NULL;
  659. struct discard_cmd *dc = NULL;
  660. if (insert_p && insert_parent) {
  661. parent = insert_parent;
  662. p = insert_p;
  663. goto do_insert;
  664. }
  665. p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
  666. do_insert:
  667. dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
  668. if (!dc)
  669. return NULL;
  670. return dc;
  671. }
  672. static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
  673. struct discard_cmd *dc)
  674. {
  675. list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
  676. }
  677. static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
  678. struct discard_cmd *dc, block_t blkaddr)
  679. {
  680. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  681. struct discard_info di = dc->di;
  682. bool modified = false;
  683. if (dc->state == D_DONE || dc->len == 1) {
  684. __remove_discard_cmd(sbi, dc);
  685. return;
  686. }
  687. dcc->undiscard_blks -= di.len;
  688. if (blkaddr > di.lstart) {
  689. dc->len = blkaddr - dc->lstart;
  690. dcc->undiscard_blks += dc->len;
  691. __relocate_discard_cmd(dcc, dc);
  692. modified = true;
  693. }
  694. if (blkaddr < di.lstart + di.len - 1) {
  695. if (modified) {
  696. __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
  697. di.start + blkaddr + 1 - di.lstart,
  698. di.lstart + di.len - 1 - blkaddr,
  699. NULL, NULL);
  700. } else {
  701. dc->lstart++;
  702. dc->len--;
  703. dc->start++;
  704. dcc->undiscard_blks += dc->len;
  705. __relocate_discard_cmd(dcc, dc);
  706. }
  707. }
  708. }
  709. static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
  710. struct block_device *bdev, block_t lstart,
  711. block_t start, block_t len)
  712. {
  713. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  714. struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
  715. struct discard_cmd *dc;
  716. struct discard_info di = {0};
  717. struct rb_node **insert_p = NULL, *insert_parent = NULL;
  718. block_t end = lstart + len;
  719. mutex_lock(&dcc->cmd_lock);
  720. dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
  721. NULL, lstart,
  722. (struct rb_entry **)&prev_dc,
  723. (struct rb_entry **)&next_dc,
  724. &insert_p, &insert_parent, true);
  725. if (dc)
  726. prev_dc = dc;
  727. if (!prev_dc) {
  728. di.lstart = lstart;
  729. di.len = next_dc ? next_dc->lstart - lstart : len;
  730. di.len = min(di.len, len);
  731. di.start = start;
  732. }
  733. while (1) {
  734. struct rb_node *node;
  735. bool merged = false;
  736. struct discard_cmd *tdc = NULL;
  737. if (prev_dc) {
  738. di.lstart = prev_dc->lstart + prev_dc->len;
  739. if (di.lstart < lstart)
  740. di.lstart = lstart;
  741. if (di.lstart >= end)
  742. break;
  743. if (!next_dc || next_dc->lstart > end)
  744. di.len = end - di.lstart;
  745. else
  746. di.len = next_dc->lstart - di.lstart;
  747. di.start = start + di.lstart - lstart;
  748. }
  749. if (!di.len)
  750. goto next;
  751. if (prev_dc && prev_dc->state == D_PREP &&
  752. prev_dc->bdev == bdev &&
  753. __is_discard_back_mergeable(&di, &prev_dc->di)) {
  754. prev_dc->di.len += di.len;
  755. dcc->undiscard_blks += di.len;
  756. __relocate_discard_cmd(dcc, prev_dc);
  757. di = prev_dc->di;
  758. tdc = prev_dc;
  759. merged = true;
  760. }
  761. if (next_dc && next_dc->state == D_PREP &&
  762. next_dc->bdev == bdev &&
  763. __is_discard_front_mergeable(&di, &next_dc->di)) {
  764. next_dc->di.lstart = di.lstart;
  765. next_dc->di.len += di.len;
  766. next_dc->di.start = di.start;
  767. dcc->undiscard_blks += di.len;
  768. __relocate_discard_cmd(dcc, next_dc);
  769. if (tdc)
  770. __remove_discard_cmd(sbi, tdc);
  771. merged = true;
  772. }
  773. if (!merged) {
  774. __insert_discard_tree(sbi, bdev, di.lstart, di.start,
  775. di.len, NULL, NULL);
  776. }
  777. next:
  778. prev_dc = next_dc;
  779. if (!prev_dc)
  780. break;
  781. node = rb_next(&prev_dc->rb_node);
  782. next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
  783. }
  784. mutex_unlock(&dcc->cmd_lock);
  785. }
  786. static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
  787. struct block_device *bdev, block_t blkstart, block_t blklen)
  788. {
  789. block_t lblkstart = blkstart;
  790. trace_f2fs_queue_discard(bdev, blkstart, blklen);
  791. if (sbi->s_ndevs) {
  792. int devi = f2fs_target_device_index(sbi, blkstart);
  793. blkstart -= FDEV(devi).start_blk;
  794. }
  795. __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
  796. return 0;
  797. }
  798. static void __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond)
  799. {
  800. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  801. struct list_head *pend_list;
  802. struct discard_cmd *dc, *tmp;
  803. struct blk_plug plug;
  804. int i, iter = 0;
  805. mutex_lock(&dcc->cmd_lock);
  806. f2fs_bug_on(sbi,
  807. !__check_rb_tree_consistence(sbi, &dcc->root));
  808. blk_start_plug(&plug);
  809. for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
  810. pend_list = &dcc->pend_list[i];
  811. list_for_each_entry_safe(dc, tmp, pend_list, list) {
  812. f2fs_bug_on(sbi, dc->state != D_PREP);
  813. if (!issue_cond || is_idle(sbi))
  814. __submit_discard_cmd(sbi, dc);
  815. if (issue_cond && iter++ > DISCARD_ISSUE_RATE)
  816. goto out;
  817. }
  818. }
  819. out:
  820. blk_finish_plug(&plug);
  821. mutex_unlock(&dcc->cmd_lock);
  822. }
  823. static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond)
  824. {
  825. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  826. struct list_head *wait_list = &(dcc->wait_list);
  827. struct discard_cmd *dc, *tmp;
  828. mutex_lock(&dcc->cmd_lock);
  829. list_for_each_entry_safe(dc, tmp, wait_list, list) {
  830. if (!wait_cond || dc->state == D_DONE) {
  831. if (dc->ref)
  832. continue;
  833. wait_for_completion_io(&dc->wait);
  834. __remove_discard_cmd(sbi, dc);
  835. }
  836. }
  837. mutex_unlock(&dcc->cmd_lock);
  838. }
  839. /* This should be covered by global mutex, &sit_i->sentry_lock */
  840. void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
  841. {
  842. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  843. struct discard_cmd *dc;
  844. bool need_wait = false;
  845. mutex_lock(&dcc->cmd_lock);
  846. dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
  847. if (dc) {
  848. if (dc->state == D_PREP) {
  849. __punch_discard_cmd(sbi, dc, blkaddr);
  850. } else {
  851. dc->ref++;
  852. need_wait = true;
  853. }
  854. }
  855. mutex_unlock(&dcc->cmd_lock);
  856. if (need_wait) {
  857. wait_for_completion_io(&dc->wait);
  858. mutex_lock(&dcc->cmd_lock);
  859. f2fs_bug_on(sbi, dc->state != D_DONE);
  860. dc->ref--;
  861. if (!dc->ref)
  862. __remove_discard_cmd(sbi, dc);
  863. mutex_unlock(&dcc->cmd_lock);
  864. }
  865. }
  866. /* This comes from f2fs_put_super */
  867. void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
  868. {
  869. __issue_discard_cmd(sbi, false);
  870. __wait_discard_cmd(sbi, false);
  871. }
  872. static int issue_discard_thread(void *data)
  873. {
  874. struct f2fs_sb_info *sbi = data;
  875. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  876. wait_queue_head_t *q = &dcc->discard_wait_queue;
  877. set_freezable();
  878. do {
  879. wait_event_interruptible(*q, kthread_should_stop() ||
  880. freezing(current) ||
  881. atomic_read(&dcc->discard_cmd_cnt));
  882. if (try_to_freeze())
  883. continue;
  884. if (kthread_should_stop())
  885. return 0;
  886. __issue_discard_cmd(sbi, true);
  887. __wait_discard_cmd(sbi, true);
  888. congestion_wait(BLK_RW_SYNC, HZ/50);
  889. } while (!kthread_should_stop());
  890. return 0;
  891. }
  892. #ifdef CONFIG_BLK_DEV_ZONED
  893. static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
  894. struct block_device *bdev, block_t blkstart, block_t blklen)
  895. {
  896. sector_t sector, nr_sects;
  897. block_t lblkstart = blkstart;
  898. int devi = 0;
  899. if (sbi->s_ndevs) {
  900. devi = f2fs_target_device_index(sbi, blkstart);
  901. blkstart -= FDEV(devi).start_blk;
  902. }
  903. /*
  904. * We need to know the type of the zone: for conventional zones,
  905. * use regular discard if the drive supports it. For sequential
  906. * zones, reset the zone write pointer.
  907. */
  908. switch (get_blkz_type(sbi, bdev, blkstart)) {
  909. case BLK_ZONE_TYPE_CONVENTIONAL:
  910. if (!blk_queue_discard(bdev_get_queue(bdev)))
  911. return 0;
  912. return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
  913. case BLK_ZONE_TYPE_SEQWRITE_REQ:
  914. case BLK_ZONE_TYPE_SEQWRITE_PREF:
  915. sector = SECTOR_FROM_BLOCK(blkstart);
  916. nr_sects = SECTOR_FROM_BLOCK(blklen);
  917. if (sector & (bdev_zone_sectors(bdev) - 1) ||
  918. nr_sects != bdev_zone_sectors(bdev)) {
  919. f2fs_msg(sbi->sb, KERN_INFO,
  920. "(%d) %s: Unaligned discard attempted (block %x + %x)",
  921. devi, sbi->s_ndevs ? FDEV(devi).path: "",
  922. blkstart, blklen);
  923. return -EIO;
  924. }
  925. trace_f2fs_issue_reset_zone(bdev, blkstart);
  926. return blkdev_reset_zones(bdev, sector,
  927. nr_sects, GFP_NOFS);
  928. default:
  929. /* Unknown zone type: broken device ? */
  930. return -EIO;
  931. }
  932. }
  933. #endif
  934. static int __issue_discard_async(struct f2fs_sb_info *sbi,
  935. struct block_device *bdev, block_t blkstart, block_t blklen)
  936. {
  937. #ifdef CONFIG_BLK_DEV_ZONED
  938. if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
  939. bdev_zoned_model(bdev) != BLK_ZONED_NONE)
  940. return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
  941. #endif
  942. return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
  943. }
  944. static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
  945. block_t blkstart, block_t blklen)
  946. {
  947. sector_t start = blkstart, len = 0;
  948. struct block_device *bdev;
  949. struct seg_entry *se;
  950. unsigned int offset;
  951. block_t i;
  952. int err = 0;
  953. bdev = f2fs_target_device(sbi, blkstart, NULL);
  954. for (i = blkstart; i < blkstart + blklen; i++, len++) {
  955. if (i != start) {
  956. struct block_device *bdev2 =
  957. f2fs_target_device(sbi, i, NULL);
  958. if (bdev2 != bdev) {
  959. err = __issue_discard_async(sbi, bdev,
  960. start, len);
  961. if (err)
  962. return err;
  963. bdev = bdev2;
  964. start = i;
  965. len = 0;
  966. }
  967. }
  968. se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
  969. offset = GET_BLKOFF_FROM_SEG0(sbi, i);
  970. if (!f2fs_test_and_set_bit(offset, se->discard_map))
  971. sbi->discard_blks--;
  972. }
  973. if (len)
  974. err = __issue_discard_async(sbi, bdev, start, len);
  975. return err;
  976. }
  977. static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
  978. bool check_only)
  979. {
  980. int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
  981. int max_blocks = sbi->blocks_per_seg;
  982. struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
  983. unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
  984. unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
  985. unsigned long *discard_map = (unsigned long *)se->discard_map;
  986. unsigned long *dmap = SIT_I(sbi)->tmp_map;
  987. unsigned int start = 0, end = -1;
  988. bool force = (cpc->reason & CP_DISCARD);
  989. struct discard_entry *de = NULL;
  990. struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
  991. int i;
  992. if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
  993. return false;
  994. if (!force) {
  995. if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
  996. SM_I(sbi)->dcc_info->nr_discards >=
  997. SM_I(sbi)->dcc_info->max_discards)
  998. return false;
  999. }
  1000. /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
  1001. for (i = 0; i < entries; i++)
  1002. dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
  1003. (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
  1004. while (force || SM_I(sbi)->dcc_info->nr_discards <=
  1005. SM_I(sbi)->dcc_info->max_discards) {
  1006. start = __find_rev_next_bit(dmap, max_blocks, end + 1);
  1007. if (start >= max_blocks)
  1008. break;
  1009. end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
  1010. if (force && start && end != max_blocks
  1011. && (end - start) < cpc->trim_minlen)
  1012. continue;
  1013. if (check_only)
  1014. return true;
  1015. if (!de) {
  1016. de = f2fs_kmem_cache_alloc(discard_entry_slab,
  1017. GFP_F2FS_ZERO);
  1018. de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
  1019. list_add_tail(&de->list, head);
  1020. }
  1021. for (i = start; i < end; i++)
  1022. __set_bit_le(i, (void *)de->discard_map);
  1023. SM_I(sbi)->dcc_info->nr_discards += end - start;
  1024. }
  1025. return false;
  1026. }
  1027. void release_discard_addrs(struct f2fs_sb_info *sbi)
  1028. {
  1029. struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
  1030. struct discard_entry *entry, *this;
  1031. /* drop caches */
  1032. list_for_each_entry_safe(entry, this, head, list) {
  1033. list_del(&entry->list);
  1034. kmem_cache_free(discard_entry_slab, entry);
  1035. }
  1036. }
  1037. /*
  1038. * Should call clear_prefree_segments after checkpoint is done.
  1039. */
  1040. static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
  1041. {
  1042. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  1043. unsigned int segno;
  1044. mutex_lock(&dirty_i->seglist_lock);
  1045. for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
  1046. __set_test_and_free(sbi, segno);
  1047. mutex_unlock(&dirty_i->seglist_lock);
  1048. }
  1049. void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  1050. {
  1051. struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
  1052. struct discard_entry *entry, *this;
  1053. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  1054. unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
  1055. unsigned int start = 0, end = -1;
  1056. unsigned int secno, start_segno;
  1057. bool force = (cpc->reason & CP_DISCARD);
  1058. mutex_lock(&dirty_i->seglist_lock);
  1059. while (1) {
  1060. int i;
  1061. start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
  1062. if (start >= MAIN_SEGS(sbi))
  1063. break;
  1064. end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
  1065. start + 1);
  1066. for (i = start; i < end; i++)
  1067. clear_bit(i, prefree_map);
  1068. dirty_i->nr_dirty[PRE] -= end - start;
  1069. if (!test_opt(sbi, DISCARD))
  1070. continue;
  1071. if (force && start >= cpc->trim_start &&
  1072. (end - 1) <= cpc->trim_end)
  1073. continue;
  1074. if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
  1075. f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
  1076. (end - start) << sbi->log_blocks_per_seg);
  1077. continue;
  1078. }
  1079. next:
  1080. secno = GET_SEC_FROM_SEG(sbi, start);
  1081. start_segno = GET_SEG_FROM_SEC(sbi, secno);
  1082. if (!IS_CURSEC(sbi, secno) &&
  1083. !get_valid_blocks(sbi, start, true))
  1084. f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
  1085. sbi->segs_per_sec << sbi->log_blocks_per_seg);
  1086. start = start_segno + sbi->segs_per_sec;
  1087. if (start < end)
  1088. goto next;
  1089. else
  1090. end = start - 1;
  1091. }
  1092. mutex_unlock(&dirty_i->seglist_lock);
  1093. /* send small discards */
  1094. list_for_each_entry_safe(entry, this, head, list) {
  1095. unsigned int cur_pos = 0, next_pos, len, total_len = 0;
  1096. bool is_valid = test_bit_le(0, entry->discard_map);
  1097. find_next:
  1098. if (is_valid) {
  1099. next_pos = find_next_zero_bit_le(entry->discard_map,
  1100. sbi->blocks_per_seg, cur_pos);
  1101. len = next_pos - cur_pos;
  1102. if (force && len < cpc->trim_minlen)
  1103. goto skip;
  1104. f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
  1105. len);
  1106. cpc->trimmed += len;
  1107. total_len += len;
  1108. } else {
  1109. next_pos = find_next_bit_le(entry->discard_map,
  1110. sbi->blocks_per_seg, cur_pos);
  1111. }
  1112. skip:
  1113. cur_pos = next_pos;
  1114. is_valid = !is_valid;
  1115. if (cur_pos < sbi->blocks_per_seg)
  1116. goto find_next;
  1117. list_del(&entry->list);
  1118. SM_I(sbi)->dcc_info->nr_discards -= total_len;
  1119. kmem_cache_free(discard_entry_slab, entry);
  1120. }
  1121. wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
  1122. }
  1123. static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
  1124. {
  1125. dev_t dev = sbi->sb->s_bdev->bd_dev;
  1126. struct discard_cmd_control *dcc;
  1127. int err = 0, i;
  1128. if (SM_I(sbi)->dcc_info) {
  1129. dcc = SM_I(sbi)->dcc_info;
  1130. goto init_thread;
  1131. }
  1132. dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
  1133. if (!dcc)
  1134. return -ENOMEM;
  1135. INIT_LIST_HEAD(&dcc->entry_list);
  1136. for (i = 0; i < MAX_PLIST_NUM; i++)
  1137. INIT_LIST_HEAD(&dcc->pend_list[i]);
  1138. INIT_LIST_HEAD(&dcc->wait_list);
  1139. mutex_init(&dcc->cmd_lock);
  1140. atomic_set(&dcc->issued_discard, 0);
  1141. atomic_set(&dcc->issing_discard, 0);
  1142. atomic_set(&dcc->discard_cmd_cnt, 0);
  1143. dcc->nr_discards = 0;
  1144. dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
  1145. dcc->undiscard_blks = 0;
  1146. dcc->root = RB_ROOT;
  1147. init_waitqueue_head(&dcc->discard_wait_queue);
  1148. SM_I(sbi)->dcc_info = dcc;
  1149. init_thread:
  1150. dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
  1151. "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
  1152. if (IS_ERR(dcc->f2fs_issue_discard)) {
  1153. err = PTR_ERR(dcc->f2fs_issue_discard);
  1154. kfree(dcc);
  1155. SM_I(sbi)->dcc_info = NULL;
  1156. return err;
  1157. }
  1158. return err;
  1159. }
  1160. static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
  1161. {
  1162. struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  1163. if (!dcc)
  1164. return;
  1165. if (dcc->f2fs_issue_discard) {
  1166. struct task_struct *discard_thread = dcc->f2fs_issue_discard;
  1167. dcc->f2fs_issue_discard = NULL;
  1168. kthread_stop(discard_thread);
  1169. }
  1170. kfree(dcc);
  1171. SM_I(sbi)->dcc_info = NULL;
  1172. }
  1173. static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
  1174. {
  1175. struct sit_info *sit_i = SIT_I(sbi);
  1176. if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
  1177. sit_i->dirty_sentries++;
  1178. return false;
  1179. }
  1180. return true;
  1181. }
  1182. static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
  1183. unsigned int segno, int modified)
  1184. {
  1185. struct seg_entry *se = get_seg_entry(sbi, segno);
  1186. se->type = type;
  1187. if (modified)
  1188. __mark_sit_entry_dirty(sbi, segno);
  1189. }
  1190. static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
  1191. {
  1192. struct seg_entry *se;
  1193. unsigned int segno, offset;
  1194. long int new_vblocks;
  1195. segno = GET_SEGNO(sbi, blkaddr);
  1196. se = get_seg_entry(sbi, segno);
  1197. new_vblocks = se->valid_blocks + del;
  1198. offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
  1199. f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
  1200. (new_vblocks > sbi->blocks_per_seg)));
  1201. se->valid_blocks = new_vblocks;
  1202. se->mtime = get_mtime(sbi);
  1203. SIT_I(sbi)->max_mtime = se->mtime;
  1204. /* Update valid block bitmap */
  1205. if (del > 0) {
  1206. if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) {
  1207. #ifdef CONFIG_F2FS_CHECK_FS
  1208. if (f2fs_test_and_set_bit(offset,
  1209. se->cur_valid_map_mir))
  1210. f2fs_bug_on(sbi, 1);
  1211. else
  1212. WARN_ON(1);
  1213. #else
  1214. f2fs_bug_on(sbi, 1);
  1215. #endif
  1216. }
  1217. if (f2fs_discard_en(sbi) &&
  1218. !f2fs_test_and_set_bit(offset, se->discard_map))
  1219. sbi->discard_blks--;
  1220. /* don't overwrite by SSR to keep node chain */
  1221. if (se->type == CURSEG_WARM_NODE) {
  1222. if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
  1223. se->ckpt_valid_blocks++;
  1224. }
  1225. } else {
  1226. if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
  1227. #ifdef CONFIG_F2FS_CHECK_FS
  1228. if (!f2fs_test_and_clear_bit(offset,
  1229. se->cur_valid_map_mir))
  1230. f2fs_bug_on(sbi, 1);
  1231. else
  1232. WARN_ON(1);
  1233. #else
  1234. f2fs_bug_on(sbi, 1);
  1235. #endif
  1236. }
  1237. if (f2fs_discard_en(sbi) &&
  1238. f2fs_test_and_clear_bit(offset, se->discard_map))
  1239. sbi->discard_blks++;
  1240. }
  1241. if (!f2fs_test_bit(offset, se->ckpt_valid_map))
  1242. se->ckpt_valid_blocks += del;
  1243. __mark_sit_entry_dirty(sbi, segno);
  1244. /* update total number of valid blocks to be written in ckpt area */
  1245. SIT_I(sbi)->written_valid_blocks += del;
  1246. if (sbi->segs_per_sec > 1)
  1247. get_sec_entry(sbi, segno)->valid_blocks += del;
  1248. }
  1249. void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
  1250. {
  1251. update_sit_entry(sbi, new, 1);
  1252. if (GET_SEGNO(sbi, old) != NULL_SEGNO)
  1253. update_sit_entry(sbi, old, -1);
  1254. locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
  1255. locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
  1256. }
  1257. void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
  1258. {
  1259. unsigned int segno = GET_SEGNO(sbi, addr);
  1260. struct sit_info *sit_i = SIT_I(sbi);
  1261. f2fs_bug_on(sbi, addr == NULL_ADDR);
  1262. if (addr == NEW_ADDR)
  1263. return;
  1264. /* add it into sit main buffer */
  1265. mutex_lock(&sit_i->sentry_lock);
  1266. update_sit_entry(sbi, addr, -1);
  1267. /* add it into dirty seglist */
  1268. locate_dirty_segment(sbi, segno);
  1269. mutex_unlock(&sit_i->sentry_lock);
  1270. }
  1271. bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
  1272. {
  1273. struct sit_info *sit_i = SIT_I(sbi);
  1274. unsigned int segno, offset;
  1275. struct seg_entry *se;
  1276. bool is_cp = false;
  1277. if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
  1278. return true;
  1279. mutex_lock(&sit_i->sentry_lock);
  1280. segno = GET_SEGNO(sbi, blkaddr);
  1281. se = get_seg_entry(sbi, segno);
  1282. offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
  1283. if (f2fs_test_bit(offset, se->ckpt_valid_map))
  1284. is_cp = true;
  1285. mutex_unlock(&sit_i->sentry_lock);
  1286. return is_cp;
  1287. }
  1288. /*
  1289. * This function should be resided under the curseg_mutex lock
  1290. */
  1291. static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
  1292. struct f2fs_summary *sum)
  1293. {
  1294. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1295. void *addr = curseg->sum_blk;
  1296. addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
  1297. memcpy(addr, sum, sizeof(struct f2fs_summary));
  1298. }
  1299. /*
  1300. * Calculate the number of current summary pages for writing
  1301. */
  1302. int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
  1303. {
  1304. int valid_sum_count = 0;
  1305. int i, sum_in_page;
  1306. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  1307. if (sbi->ckpt->alloc_type[i] == SSR)
  1308. valid_sum_count += sbi->blocks_per_seg;
  1309. else {
  1310. if (for_ra)
  1311. valid_sum_count += le16_to_cpu(
  1312. F2FS_CKPT(sbi)->cur_data_blkoff[i]);
  1313. else
  1314. valid_sum_count += curseg_blkoff(sbi, i);
  1315. }
  1316. }
  1317. sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
  1318. SUM_FOOTER_SIZE) / SUMMARY_SIZE;
  1319. if (valid_sum_count <= sum_in_page)
  1320. return 1;
  1321. else if ((valid_sum_count - sum_in_page) <=
  1322. (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
  1323. return 2;
  1324. return 3;
  1325. }
  1326. /*
  1327. * Caller should put this summary page
  1328. */
  1329. struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
  1330. {
  1331. return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
  1332. }
  1333. void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
  1334. {
  1335. struct page *page = grab_meta_page(sbi, blk_addr);
  1336. void *dst = page_address(page);
  1337. if (src)
  1338. memcpy(dst, src, PAGE_SIZE);
  1339. else
  1340. memset(dst, 0, PAGE_SIZE);
  1341. set_page_dirty(page);
  1342. f2fs_put_page(page, 1);
  1343. }
  1344. static void write_sum_page(struct f2fs_sb_info *sbi,
  1345. struct f2fs_summary_block *sum_blk, block_t blk_addr)
  1346. {
  1347. update_meta_page(sbi, (void *)sum_blk, blk_addr);
  1348. }
  1349. static void write_current_sum_page(struct f2fs_sb_info *sbi,
  1350. int type, block_t blk_addr)
  1351. {
  1352. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1353. struct page *page = grab_meta_page(sbi, blk_addr);
  1354. struct f2fs_summary_block *src = curseg->sum_blk;
  1355. struct f2fs_summary_block *dst;
  1356. dst = (struct f2fs_summary_block *)page_address(page);
  1357. mutex_lock(&curseg->curseg_mutex);
  1358. down_read(&curseg->journal_rwsem);
  1359. memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
  1360. up_read(&curseg->journal_rwsem);
  1361. memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
  1362. memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
  1363. mutex_unlock(&curseg->curseg_mutex);
  1364. set_page_dirty(page);
  1365. f2fs_put_page(page, 1);
  1366. }
  1367. static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
  1368. {
  1369. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1370. unsigned int segno = curseg->segno + 1;
  1371. struct free_segmap_info *free_i = FREE_I(sbi);
  1372. if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
  1373. return !test_bit(segno, free_i->free_segmap);
  1374. return 0;
  1375. }
  1376. /*
  1377. * Find a new segment from the free segments bitmap to right order
  1378. * This function should be returned with success, otherwise BUG
  1379. */
  1380. static void get_new_segment(struct f2fs_sb_info *sbi,
  1381. unsigned int *newseg, bool new_sec, int dir)
  1382. {
  1383. struct free_segmap_info *free_i = FREE_I(sbi);
  1384. unsigned int segno, secno, zoneno;
  1385. unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
  1386. unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
  1387. unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
  1388. unsigned int left_start = hint;
  1389. bool init = true;
  1390. int go_left = 0;
  1391. int i;
  1392. spin_lock(&free_i->segmap_lock);
  1393. if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
  1394. segno = find_next_zero_bit(free_i->free_segmap,
  1395. GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
  1396. if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
  1397. goto got_it;
  1398. }
  1399. find_other_zone:
  1400. secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
  1401. if (secno >= MAIN_SECS(sbi)) {
  1402. if (dir == ALLOC_RIGHT) {
  1403. secno = find_next_zero_bit(free_i->free_secmap,
  1404. MAIN_SECS(sbi), 0);
  1405. f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
  1406. } else {
  1407. go_left = 1;
  1408. left_start = hint - 1;
  1409. }
  1410. }
  1411. if (go_left == 0)
  1412. goto skip_left;
  1413. while (test_bit(left_start, free_i->free_secmap)) {
  1414. if (left_start > 0) {
  1415. left_start--;
  1416. continue;
  1417. }
  1418. left_start = find_next_zero_bit(free_i->free_secmap,
  1419. MAIN_SECS(sbi), 0);
  1420. f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
  1421. break;
  1422. }
  1423. secno = left_start;
  1424. skip_left:
  1425. hint = secno;
  1426. segno = GET_SEG_FROM_SEC(sbi, secno);
  1427. zoneno = GET_ZONE_FROM_SEC(sbi, secno);
  1428. /* give up on finding another zone */
  1429. if (!init)
  1430. goto got_it;
  1431. if (sbi->secs_per_zone == 1)
  1432. goto got_it;
  1433. if (zoneno == old_zoneno)
  1434. goto got_it;
  1435. if (dir == ALLOC_LEFT) {
  1436. if (!go_left && zoneno + 1 >= total_zones)
  1437. goto got_it;
  1438. if (go_left && zoneno == 0)
  1439. goto got_it;
  1440. }
  1441. for (i = 0; i < NR_CURSEG_TYPE; i++)
  1442. if (CURSEG_I(sbi, i)->zone == zoneno)
  1443. break;
  1444. if (i < NR_CURSEG_TYPE) {
  1445. /* zone is in user, try another */
  1446. if (go_left)
  1447. hint = zoneno * sbi->secs_per_zone - 1;
  1448. else if (zoneno + 1 >= total_zones)
  1449. hint = 0;
  1450. else
  1451. hint = (zoneno + 1) * sbi->secs_per_zone;
  1452. init = false;
  1453. goto find_other_zone;
  1454. }
  1455. got_it:
  1456. /* set it as dirty segment in free segmap */
  1457. f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
  1458. __set_inuse(sbi, segno);
  1459. *newseg = segno;
  1460. spin_unlock(&free_i->segmap_lock);
  1461. }
  1462. static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
  1463. {
  1464. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1465. struct summary_footer *sum_footer;
  1466. curseg->segno = curseg->next_segno;
  1467. curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
  1468. curseg->next_blkoff = 0;
  1469. curseg->next_segno = NULL_SEGNO;
  1470. sum_footer = &(curseg->sum_blk->footer);
  1471. memset(sum_footer, 0, sizeof(struct summary_footer));
  1472. if (IS_DATASEG(type))
  1473. SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
  1474. if (IS_NODESEG(type))
  1475. SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
  1476. __set_sit_entry_type(sbi, type, curseg->segno, modified);
  1477. }
  1478. static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
  1479. {
  1480. /* if segs_per_sec is large than 1, we need to keep original policy. */
  1481. if (sbi->segs_per_sec != 1)
  1482. return CURSEG_I(sbi, type)->segno;
  1483. if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
  1484. return 0;
  1485. if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
  1486. return SIT_I(sbi)->last_victim[ALLOC_NEXT];
  1487. return CURSEG_I(sbi, type)->segno;
  1488. }
  1489. /*
  1490. * Allocate a current working segment.
  1491. * This function always allocates a free segment in LFS manner.
  1492. */
  1493. static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
  1494. {
  1495. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1496. unsigned int segno = curseg->segno;
  1497. int dir = ALLOC_LEFT;
  1498. write_sum_page(sbi, curseg->sum_blk,
  1499. GET_SUM_BLOCK(sbi, segno));
  1500. if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
  1501. dir = ALLOC_RIGHT;
  1502. if (test_opt(sbi, NOHEAP))
  1503. dir = ALLOC_RIGHT;
  1504. segno = __get_next_segno(sbi, type);
  1505. get_new_segment(sbi, &segno, new_sec, dir);
  1506. curseg->next_segno = segno;
  1507. reset_curseg(sbi, type, 1);
  1508. curseg->alloc_type = LFS;
  1509. }
  1510. static void __next_free_blkoff(struct f2fs_sb_info *sbi,
  1511. struct curseg_info *seg, block_t start)
  1512. {
  1513. struct seg_entry *se = get_seg_entry(sbi, seg->segno);
  1514. int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
  1515. unsigned long *target_map = SIT_I(sbi)->tmp_map;
  1516. unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
  1517. unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
  1518. int i, pos;
  1519. for (i = 0; i < entries; i++)
  1520. target_map[i] = ckpt_map[i] | cur_map[i];
  1521. pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
  1522. seg->next_blkoff = pos;
  1523. }
  1524. /*
  1525. * If a segment is written by LFS manner, next block offset is just obtained
  1526. * by increasing the current block offset. However, if a segment is written by
  1527. * SSR manner, next block offset obtained by calling __next_free_blkoff
  1528. */
  1529. static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
  1530. struct curseg_info *seg)
  1531. {
  1532. if (seg->alloc_type == SSR)
  1533. __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
  1534. else
  1535. seg->next_blkoff++;
  1536. }
  1537. /*
  1538. * This function always allocates a used segment(from dirty seglist) by SSR
  1539. * manner, so it should recover the existing segment information of valid blocks
  1540. */
  1541. static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
  1542. {
  1543. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  1544. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1545. unsigned int new_segno = curseg->next_segno;
  1546. struct f2fs_summary_block *sum_node;
  1547. struct page *sum_page;
  1548. write_sum_page(sbi, curseg->sum_blk,
  1549. GET_SUM_BLOCK(sbi, curseg->segno));
  1550. __set_test_and_inuse(sbi, new_segno);
  1551. mutex_lock(&dirty_i->seglist_lock);
  1552. __remove_dirty_segment(sbi, new_segno, PRE);
  1553. __remove_dirty_segment(sbi, new_segno, DIRTY);
  1554. mutex_unlock(&dirty_i->seglist_lock);
  1555. reset_curseg(sbi, type, 1);
  1556. curseg->alloc_type = SSR;
  1557. __next_free_blkoff(sbi, curseg, 0);
  1558. if (reuse) {
  1559. sum_page = get_sum_page(sbi, new_segno);
  1560. sum_node = (struct f2fs_summary_block *)page_address(sum_page);
  1561. memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
  1562. f2fs_put_page(sum_page, 1);
  1563. }
  1564. }
  1565. static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
  1566. {
  1567. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1568. const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
  1569. unsigned segno = NULL_SEGNO;
  1570. int i, cnt;
  1571. bool reversed = false;
  1572. /* need_SSR() already forces to do this */
  1573. if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
  1574. curseg->next_segno = segno;
  1575. return 1;
  1576. }
  1577. /* For node segments, let's do SSR more intensively */
  1578. if (IS_NODESEG(type)) {
  1579. if (type >= CURSEG_WARM_NODE) {
  1580. reversed = true;
  1581. i = CURSEG_COLD_NODE;
  1582. } else {
  1583. i = CURSEG_HOT_NODE;
  1584. }
  1585. cnt = NR_CURSEG_NODE_TYPE;
  1586. } else {
  1587. if (type >= CURSEG_WARM_DATA) {
  1588. reversed = true;
  1589. i = CURSEG_COLD_DATA;
  1590. } else {
  1591. i = CURSEG_HOT_DATA;
  1592. }
  1593. cnt = NR_CURSEG_DATA_TYPE;
  1594. }
  1595. for (; cnt-- > 0; reversed ? i-- : i++) {
  1596. if (i == type)
  1597. continue;
  1598. if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
  1599. curseg->next_segno = segno;
  1600. return 1;
  1601. }
  1602. }
  1603. return 0;
  1604. }
  1605. /*
  1606. * flush out current segment and replace it with new segment
  1607. * This function should be returned with success, otherwise BUG
  1608. */
  1609. static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
  1610. int type, bool force)
  1611. {
  1612. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1613. if (force)
  1614. new_curseg(sbi, type, true);
  1615. else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
  1616. type == CURSEG_WARM_NODE)
  1617. new_curseg(sbi, type, false);
  1618. else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
  1619. new_curseg(sbi, type, false);
  1620. else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
  1621. change_curseg(sbi, type, true);
  1622. else
  1623. new_curseg(sbi, type, false);
  1624. stat_inc_seg_type(sbi, curseg);
  1625. }
  1626. void allocate_new_segments(struct f2fs_sb_info *sbi)
  1627. {
  1628. struct curseg_info *curseg;
  1629. unsigned int old_segno;
  1630. int i;
  1631. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  1632. curseg = CURSEG_I(sbi, i);
  1633. old_segno = curseg->segno;
  1634. SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
  1635. locate_dirty_segment(sbi, old_segno);
  1636. }
  1637. }
  1638. static const struct segment_allocation default_salloc_ops = {
  1639. .allocate_segment = allocate_segment_by_default,
  1640. };
  1641. bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  1642. {
  1643. __u64 trim_start = cpc->trim_start;
  1644. bool has_candidate = false;
  1645. mutex_lock(&SIT_I(sbi)->sentry_lock);
  1646. for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
  1647. if (add_discard_addrs(sbi, cpc, true)) {
  1648. has_candidate = true;
  1649. break;
  1650. }
  1651. }
  1652. mutex_unlock(&SIT_I(sbi)->sentry_lock);
  1653. cpc->trim_start = trim_start;
  1654. return has_candidate;
  1655. }
  1656. int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
  1657. {
  1658. __u64 start = F2FS_BYTES_TO_BLK(range->start);
  1659. __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
  1660. unsigned int start_segno, end_segno;
  1661. struct cp_control cpc;
  1662. int err = 0;
  1663. if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
  1664. return -EINVAL;
  1665. cpc.trimmed = 0;
  1666. if (end <= MAIN_BLKADDR(sbi))
  1667. goto out;
  1668. if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
  1669. f2fs_msg(sbi->sb, KERN_WARNING,
  1670. "Found FS corruption, run fsck to fix.");
  1671. goto out;
  1672. }
  1673. /* start/end segment number in main_area */
  1674. start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
  1675. end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
  1676. GET_SEGNO(sbi, end);
  1677. cpc.reason = CP_DISCARD;
  1678. cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
  1679. /* do checkpoint to issue discard commands safely */
  1680. for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
  1681. cpc.trim_start = start_segno;
  1682. if (sbi->discard_blks == 0)
  1683. break;
  1684. else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
  1685. cpc.trim_end = end_segno;
  1686. else
  1687. cpc.trim_end = min_t(unsigned int,
  1688. rounddown(start_segno +
  1689. BATCHED_TRIM_SEGMENTS(sbi),
  1690. sbi->segs_per_sec) - 1, end_segno);
  1691. mutex_lock(&sbi->gc_mutex);
  1692. err = write_checkpoint(sbi, &cpc);
  1693. mutex_unlock(&sbi->gc_mutex);
  1694. if (err)
  1695. break;
  1696. schedule();
  1697. }
  1698. out:
  1699. range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
  1700. return err;
  1701. }
  1702. static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
  1703. {
  1704. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1705. if (curseg->next_blkoff < sbi->blocks_per_seg)
  1706. return true;
  1707. return false;
  1708. }
  1709. static int __get_segment_type_2(struct f2fs_io_info *fio)
  1710. {
  1711. if (fio->type == DATA)
  1712. return CURSEG_HOT_DATA;
  1713. else
  1714. return CURSEG_HOT_NODE;
  1715. }
  1716. static int __get_segment_type_4(struct f2fs_io_info *fio)
  1717. {
  1718. if (fio->type == DATA) {
  1719. struct inode *inode = fio->page->mapping->host;
  1720. if (S_ISDIR(inode->i_mode))
  1721. return CURSEG_HOT_DATA;
  1722. else
  1723. return CURSEG_COLD_DATA;
  1724. } else {
  1725. if (IS_DNODE(fio->page) && is_cold_node(fio->page))
  1726. return CURSEG_WARM_NODE;
  1727. else
  1728. return CURSEG_COLD_NODE;
  1729. }
  1730. }
  1731. static int __get_segment_type_6(struct f2fs_io_info *fio)
  1732. {
  1733. if (fio->type == DATA) {
  1734. struct inode *inode = fio->page->mapping->host;
  1735. if (is_cold_data(fio->page) || file_is_cold(inode))
  1736. return CURSEG_COLD_DATA;
  1737. if (is_inode_flag_set(inode, FI_HOT_DATA))
  1738. return CURSEG_HOT_DATA;
  1739. return CURSEG_WARM_DATA;
  1740. } else {
  1741. if (IS_DNODE(fio->page))
  1742. return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
  1743. CURSEG_HOT_NODE;
  1744. return CURSEG_COLD_NODE;
  1745. }
  1746. }
  1747. static int __get_segment_type(struct f2fs_io_info *fio)
  1748. {
  1749. int type = 0;
  1750. switch (fio->sbi->active_logs) {
  1751. case 2:
  1752. type = __get_segment_type_2(fio);
  1753. break;
  1754. case 4:
  1755. type = __get_segment_type_4(fio);
  1756. break;
  1757. case 6:
  1758. type = __get_segment_type_6(fio);
  1759. break;
  1760. default:
  1761. f2fs_bug_on(fio->sbi, true);
  1762. }
  1763. if (IS_HOT(type))
  1764. fio->temp = HOT;
  1765. else if (IS_WARM(type))
  1766. fio->temp = WARM;
  1767. else
  1768. fio->temp = COLD;
  1769. return type;
  1770. }
  1771. void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
  1772. block_t old_blkaddr, block_t *new_blkaddr,
  1773. struct f2fs_summary *sum, int type)
  1774. {
  1775. struct sit_info *sit_i = SIT_I(sbi);
  1776. struct curseg_info *curseg = CURSEG_I(sbi, type);
  1777. mutex_lock(&curseg->curseg_mutex);
  1778. mutex_lock(&sit_i->sentry_lock);
  1779. *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
  1780. f2fs_wait_discard_bio(sbi, *new_blkaddr);
  1781. /*
  1782. * __add_sum_entry should be resided under the curseg_mutex
  1783. * because, this function updates a summary entry in the
  1784. * current summary block.
  1785. */
  1786. __add_sum_entry(sbi, type, sum);
  1787. __refresh_next_blkoff(sbi, curseg);
  1788. stat_inc_block_count(sbi, curseg);
  1789. if (!__has_curseg_space(sbi, type))
  1790. sit_i->s_ops->allocate_segment(sbi, type, false);
  1791. /*
  1792. * SIT information should be updated after segment allocation,
  1793. * since we need to keep dirty segments precisely under SSR.
  1794. */
  1795. refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
  1796. mutex_unlock(&sit_i->sentry_lock);
  1797. if (page && IS_NODESEG(type))
  1798. fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
  1799. mutex_unlock(&curseg->curseg_mutex);
  1800. }
  1801. static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
  1802. {
  1803. int type = __get_segment_type(fio);
  1804. int err;
  1805. if (fio->type == NODE || fio->type == DATA)
  1806. mutex_lock(&fio->sbi->wio_mutex[fio->type][fio->temp]);
  1807. reallocate:
  1808. allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
  1809. &fio->new_blkaddr, sum, type);
  1810. /* writeout dirty page into bdev */
  1811. err = f2fs_submit_page_write(fio);
  1812. if (err == -EAGAIN) {
  1813. fio->old_blkaddr = fio->new_blkaddr;
  1814. goto reallocate;
  1815. }
  1816. if (fio->type == NODE || fio->type == DATA)
  1817. mutex_unlock(&fio->sbi->wio_mutex[fio->type][fio->temp]);
  1818. }
  1819. void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
  1820. {
  1821. struct f2fs_io_info fio = {
  1822. .sbi = sbi,
  1823. .type = META,
  1824. .op = REQ_OP_WRITE,
  1825. .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
  1826. .old_blkaddr = page->index,
  1827. .new_blkaddr = page->index,
  1828. .page = page,
  1829. .encrypted_page = NULL,
  1830. };
  1831. if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
  1832. fio.op_flags &= ~REQ_META;
  1833. set_page_writeback(page);
  1834. f2fs_submit_page_write(&fio);
  1835. }
  1836. void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
  1837. {
  1838. struct f2fs_summary sum;
  1839. set_summary(&sum, nid, 0, 0);
  1840. do_write_page(&sum, fio);
  1841. }
  1842. void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
  1843. {
  1844. struct f2fs_sb_info *sbi = fio->sbi;
  1845. struct f2fs_summary sum;
  1846. struct node_info ni;
  1847. f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
  1848. get_node_info(sbi, dn->nid, &ni);
  1849. set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
  1850. do_write_page(&sum, fio);
  1851. f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
  1852. }
  1853. int rewrite_data_page(struct f2fs_io_info *fio)
  1854. {
  1855. fio->new_blkaddr = fio->old_blkaddr;
  1856. stat_inc_inplace_blocks(fio->sbi);
  1857. return f2fs_submit_page_bio(fio);
  1858. }
  1859. void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
  1860. block_t old_blkaddr, block_t new_blkaddr,
  1861. bool recover_curseg, bool recover_newaddr)
  1862. {
  1863. struct sit_info *sit_i = SIT_I(sbi);
  1864. struct curseg_info *curseg;
  1865. unsigned int segno, old_cursegno;
  1866. struct seg_entry *se;
  1867. int type;
  1868. unsigned short old_blkoff;
  1869. segno = GET_SEGNO(sbi, new_blkaddr);
  1870. se = get_seg_entry(sbi, segno);
  1871. type = se->type;
  1872. if (!recover_curseg) {
  1873. /* for recovery flow */
  1874. if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
  1875. if (old_blkaddr == NULL_ADDR)
  1876. type = CURSEG_COLD_DATA;
  1877. else
  1878. type = CURSEG_WARM_DATA;
  1879. }
  1880. } else {
  1881. if (!IS_CURSEG(sbi, segno))
  1882. type = CURSEG_WARM_DATA;
  1883. }
  1884. curseg = CURSEG_I(sbi, type);
  1885. mutex_lock(&curseg->curseg_mutex);
  1886. mutex_lock(&sit_i->sentry_lock);
  1887. old_cursegno = curseg->segno;
  1888. old_blkoff = curseg->next_blkoff;
  1889. /* change the current segment */
  1890. if (segno != curseg->segno) {
  1891. curseg->next_segno = segno;
  1892. change_curseg(sbi, type, true);
  1893. }
  1894. curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
  1895. __add_sum_entry(sbi, type, sum);
  1896. if (!recover_curseg || recover_newaddr)
  1897. update_sit_entry(sbi, new_blkaddr, 1);
  1898. if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
  1899. update_sit_entry(sbi, old_blkaddr, -1);
  1900. locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
  1901. locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
  1902. locate_dirty_segment(sbi, old_cursegno);
  1903. if (recover_curseg) {
  1904. if (old_cursegno != curseg->segno) {
  1905. curseg->next_segno = old_cursegno;
  1906. change_curseg(sbi, type, true);
  1907. }
  1908. curseg->next_blkoff = old_blkoff;
  1909. }
  1910. mutex_unlock(&sit_i->sentry_lock);
  1911. mutex_unlock(&curseg->curseg_mutex);
  1912. }
  1913. void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
  1914. block_t old_addr, block_t new_addr,
  1915. unsigned char version, bool recover_curseg,
  1916. bool recover_newaddr)
  1917. {
  1918. struct f2fs_summary sum;
  1919. set_summary(&sum, dn->nid, dn->ofs_in_node, version);
  1920. __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
  1921. recover_curseg, recover_newaddr);
  1922. f2fs_update_data_blkaddr(dn, new_addr);
  1923. }
  1924. void f2fs_wait_on_page_writeback(struct page *page,
  1925. enum page_type type, bool ordered)
  1926. {
  1927. if (PageWriteback(page)) {
  1928. struct f2fs_sb_info *sbi = F2FS_P_SB(page);
  1929. f2fs_submit_merged_write_cond(sbi, page->mapping->host,
  1930. 0, page->index, type);
  1931. if (ordered)
  1932. wait_on_page_writeback(page);
  1933. else
  1934. wait_for_stable_page(page);
  1935. }
  1936. }
  1937. void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
  1938. block_t blkaddr)
  1939. {
  1940. struct page *cpage;
  1941. if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
  1942. return;
  1943. cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
  1944. if (cpage) {
  1945. f2fs_wait_on_page_writeback(cpage, DATA, true);
  1946. f2fs_put_page(cpage, 1);
  1947. }
  1948. }
  1949. static int read_compacted_summaries(struct f2fs_sb_info *sbi)
  1950. {
  1951. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  1952. struct curseg_info *seg_i;
  1953. unsigned char *kaddr;
  1954. struct page *page;
  1955. block_t start;
  1956. int i, j, offset;
  1957. start = start_sum_block(sbi);
  1958. page = get_meta_page(sbi, start++);
  1959. kaddr = (unsigned char *)page_address(page);
  1960. /* Step 1: restore nat cache */
  1961. seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
  1962. memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
  1963. /* Step 2: restore sit cache */
  1964. seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
  1965. memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
  1966. offset = 2 * SUM_JOURNAL_SIZE;
  1967. /* Step 3: restore summary entries */
  1968. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  1969. unsigned short blk_off;
  1970. unsigned int segno;
  1971. seg_i = CURSEG_I(sbi, i);
  1972. segno = le32_to_cpu(ckpt->cur_data_segno[i]);
  1973. blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
  1974. seg_i->next_segno = segno;
  1975. reset_curseg(sbi, i, 0);
  1976. seg_i->alloc_type = ckpt->alloc_type[i];
  1977. seg_i->next_blkoff = blk_off;
  1978. if (seg_i->alloc_type == SSR)
  1979. blk_off = sbi->blocks_per_seg;
  1980. for (j = 0; j < blk_off; j++) {
  1981. struct f2fs_summary *s;
  1982. s = (struct f2fs_summary *)(kaddr + offset);
  1983. seg_i->sum_blk->entries[j] = *s;
  1984. offset += SUMMARY_SIZE;
  1985. if (offset + SUMMARY_SIZE <= PAGE_SIZE -
  1986. SUM_FOOTER_SIZE)
  1987. continue;
  1988. f2fs_put_page(page, 1);
  1989. page = NULL;
  1990. page = get_meta_page(sbi, start++);
  1991. kaddr = (unsigned char *)page_address(page);
  1992. offset = 0;
  1993. }
  1994. }
  1995. f2fs_put_page(page, 1);
  1996. return 0;
  1997. }
  1998. static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
  1999. {
  2000. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  2001. struct f2fs_summary_block *sum;
  2002. struct curseg_info *curseg;
  2003. struct page *new;
  2004. unsigned short blk_off;
  2005. unsigned int segno = 0;
  2006. block_t blk_addr = 0;
  2007. /* get segment number and block addr */
  2008. if (IS_DATASEG(type)) {
  2009. segno = le32_to_cpu(ckpt->cur_data_segno[type]);
  2010. blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
  2011. CURSEG_HOT_DATA]);
  2012. if (__exist_node_summaries(sbi))
  2013. blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
  2014. else
  2015. blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
  2016. } else {
  2017. segno = le32_to_cpu(ckpt->cur_node_segno[type -
  2018. CURSEG_HOT_NODE]);
  2019. blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
  2020. CURSEG_HOT_NODE]);
  2021. if (__exist_node_summaries(sbi))
  2022. blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
  2023. type - CURSEG_HOT_NODE);
  2024. else
  2025. blk_addr = GET_SUM_BLOCK(sbi, segno);
  2026. }
  2027. new = get_meta_page(sbi, blk_addr);
  2028. sum = (struct f2fs_summary_block *)page_address(new);
  2029. if (IS_NODESEG(type)) {
  2030. if (__exist_node_summaries(sbi)) {
  2031. struct f2fs_summary *ns = &sum->entries[0];
  2032. int i;
  2033. for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
  2034. ns->version = 0;
  2035. ns->ofs_in_node = 0;
  2036. }
  2037. } else {
  2038. int err;
  2039. err = restore_node_summary(sbi, segno, sum);
  2040. if (err) {
  2041. f2fs_put_page(new, 1);
  2042. return err;
  2043. }
  2044. }
  2045. }
  2046. /* set uncompleted segment to curseg */
  2047. curseg = CURSEG_I(sbi, type);
  2048. mutex_lock(&curseg->curseg_mutex);
  2049. /* update journal info */
  2050. down_write(&curseg->journal_rwsem);
  2051. memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
  2052. up_write(&curseg->journal_rwsem);
  2053. memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
  2054. memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
  2055. curseg->next_segno = segno;
  2056. reset_curseg(sbi, type, 0);
  2057. curseg->alloc_type = ckpt->alloc_type[type];
  2058. curseg->next_blkoff = blk_off;
  2059. mutex_unlock(&curseg->curseg_mutex);
  2060. f2fs_put_page(new, 1);
  2061. return 0;
  2062. }
  2063. static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
  2064. {
  2065. int type = CURSEG_HOT_DATA;
  2066. int err;
  2067. if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
  2068. int npages = npages_for_summary_flush(sbi, true);
  2069. if (npages >= 2)
  2070. ra_meta_pages(sbi, start_sum_block(sbi), npages,
  2071. META_CP, true);
  2072. /* restore for compacted data summary */
  2073. if (read_compacted_summaries(sbi))
  2074. return -EINVAL;
  2075. type = CURSEG_HOT_NODE;
  2076. }
  2077. if (__exist_node_summaries(sbi))
  2078. ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
  2079. NR_CURSEG_TYPE - type, META_CP, true);
  2080. for (; type <= CURSEG_COLD_NODE; type++) {
  2081. err = read_normal_summaries(sbi, type);
  2082. if (err)
  2083. return err;
  2084. }
  2085. return 0;
  2086. }
  2087. static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
  2088. {
  2089. struct page *page;
  2090. unsigned char *kaddr;
  2091. struct f2fs_summary *summary;
  2092. struct curseg_info *seg_i;
  2093. int written_size = 0;
  2094. int i, j;
  2095. page = grab_meta_page(sbi, blkaddr++);
  2096. kaddr = (unsigned char *)page_address(page);
  2097. /* Step 1: write nat cache */
  2098. seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
  2099. memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
  2100. written_size += SUM_JOURNAL_SIZE;
  2101. /* Step 2: write sit cache */
  2102. seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
  2103. memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
  2104. written_size += SUM_JOURNAL_SIZE;
  2105. /* Step 3: write summary entries */
  2106. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  2107. unsigned short blkoff;
  2108. seg_i = CURSEG_I(sbi, i);
  2109. if (sbi->ckpt->alloc_type[i] == SSR)
  2110. blkoff = sbi->blocks_per_seg;
  2111. else
  2112. blkoff = curseg_blkoff(sbi, i);
  2113. for (j = 0; j < blkoff; j++) {
  2114. if (!page) {
  2115. page = grab_meta_page(sbi, blkaddr++);
  2116. kaddr = (unsigned char *)page_address(page);
  2117. written_size = 0;
  2118. }
  2119. summary = (struct f2fs_summary *)(kaddr + written_size);
  2120. *summary = seg_i->sum_blk->entries[j];
  2121. written_size += SUMMARY_SIZE;
  2122. if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
  2123. SUM_FOOTER_SIZE)
  2124. continue;
  2125. set_page_dirty(page);
  2126. f2fs_put_page(page, 1);
  2127. page = NULL;
  2128. }
  2129. }
  2130. if (page) {
  2131. set_page_dirty(page);
  2132. f2fs_put_page(page, 1);
  2133. }
  2134. }
  2135. static void write_normal_summaries(struct f2fs_sb_info *sbi,
  2136. block_t blkaddr, int type)
  2137. {
  2138. int i, end;
  2139. if (IS_DATASEG(type))
  2140. end = type + NR_CURSEG_DATA_TYPE;
  2141. else
  2142. end = type + NR_CURSEG_NODE_TYPE;
  2143. for (i = type; i < end; i++)
  2144. write_current_sum_page(sbi, i, blkaddr + (i - type));
  2145. }
  2146. void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
  2147. {
  2148. if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
  2149. write_compacted_summaries(sbi, start_blk);
  2150. else
  2151. write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
  2152. }
  2153. void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
  2154. {
  2155. write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
  2156. }
  2157. int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
  2158. unsigned int val, int alloc)
  2159. {
  2160. int i;
  2161. if (type == NAT_JOURNAL) {
  2162. for (i = 0; i < nats_in_cursum(journal); i++) {
  2163. if (le32_to_cpu(nid_in_journal(journal, i)) == val)
  2164. return i;
  2165. }
  2166. if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
  2167. return update_nats_in_cursum(journal, 1);
  2168. } else if (type == SIT_JOURNAL) {
  2169. for (i = 0; i < sits_in_cursum(journal); i++)
  2170. if (le32_to_cpu(segno_in_journal(journal, i)) == val)
  2171. return i;
  2172. if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
  2173. return update_sits_in_cursum(journal, 1);
  2174. }
  2175. return -1;
  2176. }
  2177. static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
  2178. unsigned int segno)
  2179. {
  2180. return get_meta_page(sbi, current_sit_addr(sbi, segno));
  2181. }
  2182. static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
  2183. unsigned int start)
  2184. {
  2185. struct sit_info *sit_i = SIT_I(sbi);
  2186. struct page *src_page, *dst_page;
  2187. pgoff_t src_off, dst_off;
  2188. void *src_addr, *dst_addr;
  2189. src_off = current_sit_addr(sbi, start);
  2190. dst_off = next_sit_addr(sbi, src_off);
  2191. /* get current sit block page without lock */
  2192. src_page = get_meta_page(sbi, src_off);
  2193. dst_page = grab_meta_page(sbi, dst_off);
  2194. f2fs_bug_on(sbi, PageDirty(src_page));
  2195. src_addr = page_address(src_page);
  2196. dst_addr = page_address(dst_page);
  2197. memcpy(dst_addr, src_addr, PAGE_SIZE);
  2198. set_page_dirty(dst_page);
  2199. f2fs_put_page(src_page, 1);
  2200. set_to_next_sit(sit_i, start);
  2201. return dst_page;
  2202. }
  2203. static struct sit_entry_set *grab_sit_entry_set(void)
  2204. {
  2205. struct sit_entry_set *ses =
  2206. f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
  2207. ses->entry_cnt = 0;
  2208. INIT_LIST_HEAD(&ses->set_list);
  2209. return ses;
  2210. }
  2211. static void release_sit_entry_set(struct sit_entry_set *ses)
  2212. {
  2213. list_del(&ses->set_list);
  2214. kmem_cache_free(sit_entry_set_slab, ses);
  2215. }
  2216. static void adjust_sit_entry_set(struct sit_entry_set *ses,
  2217. struct list_head *head)
  2218. {
  2219. struct sit_entry_set *next = ses;
  2220. if (list_is_last(&ses->set_list, head))
  2221. return;
  2222. list_for_each_entry_continue(next, head, set_list)
  2223. if (ses->entry_cnt <= next->entry_cnt)
  2224. break;
  2225. list_move_tail(&ses->set_list, &next->set_list);
  2226. }
  2227. static void add_sit_entry(unsigned int segno, struct list_head *head)
  2228. {
  2229. struct sit_entry_set *ses;
  2230. unsigned int start_segno = START_SEGNO(segno);
  2231. list_for_each_entry(ses, head, set_list) {
  2232. if (ses->start_segno == start_segno) {
  2233. ses->entry_cnt++;
  2234. adjust_sit_entry_set(ses, head);
  2235. return;
  2236. }
  2237. }
  2238. ses = grab_sit_entry_set();
  2239. ses->start_segno = start_segno;
  2240. ses->entry_cnt++;
  2241. list_add(&ses->set_list, head);
  2242. }
  2243. static void add_sits_in_set(struct f2fs_sb_info *sbi)
  2244. {
  2245. struct f2fs_sm_info *sm_info = SM_I(sbi);
  2246. struct list_head *set_list = &sm_info->sit_entry_set;
  2247. unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
  2248. unsigned int segno;
  2249. for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
  2250. add_sit_entry(segno, set_list);
  2251. }
  2252. static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
  2253. {
  2254. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
  2255. struct f2fs_journal *journal = curseg->journal;
  2256. int i;
  2257. down_write(&curseg->journal_rwsem);
  2258. for (i = 0; i < sits_in_cursum(journal); i++) {
  2259. unsigned int segno;
  2260. bool dirtied;
  2261. segno = le32_to_cpu(segno_in_journal(journal, i));
  2262. dirtied = __mark_sit_entry_dirty(sbi, segno);
  2263. if (!dirtied)
  2264. add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
  2265. }
  2266. update_sits_in_cursum(journal, -i);
  2267. up_write(&curseg->journal_rwsem);
  2268. }
  2269. /*
  2270. * CP calls this function, which flushes SIT entries including sit_journal,
  2271. * and moves prefree segs to free segs.
  2272. */
  2273. void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  2274. {
  2275. struct sit_info *sit_i = SIT_I(sbi);
  2276. unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
  2277. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
  2278. struct f2fs_journal *journal = curseg->journal;
  2279. struct sit_entry_set *ses, *tmp;
  2280. struct list_head *head = &SM_I(sbi)->sit_entry_set;
  2281. bool to_journal = true;
  2282. struct seg_entry *se;
  2283. mutex_lock(&sit_i->sentry_lock);
  2284. if (!sit_i->dirty_sentries)
  2285. goto out;
  2286. /*
  2287. * add and account sit entries of dirty bitmap in sit entry
  2288. * set temporarily
  2289. */
  2290. add_sits_in_set(sbi);
  2291. /*
  2292. * if there are no enough space in journal to store dirty sit
  2293. * entries, remove all entries from journal and add and account
  2294. * them in sit entry set.
  2295. */
  2296. if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
  2297. remove_sits_in_journal(sbi);
  2298. /*
  2299. * there are two steps to flush sit entries:
  2300. * #1, flush sit entries to journal in current cold data summary block.
  2301. * #2, flush sit entries to sit page.
  2302. */
  2303. list_for_each_entry_safe(ses, tmp, head, set_list) {
  2304. struct page *page = NULL;
  2305. struct f2fs_sit_block *raw_sit = NULL;
  2306. unsigned int start_segno = ses->start_segno;
  2307. unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
  2308. (unsigned long)MAIN_SEGS(sbi));
  2309. unsigned int segno = start_segno;
  2310. if (to_journal &&
  2311. !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
  2312. to_journal = false;
  2313. if (to_journal) {
  2314. down_write(&curseg->journal_rwsem);
  2315. } else {
  2316. page = get_next_sit_page(sbi, start_segno);
  2317. raw_sit = page_address(page);
  2318. }
  2319. /* flush dirty sit entries in region of current sit set */
  2320. for_each_set_bit_from(segno, bitmap, end) {
  2321. int offset, sit_offset;
  2322. se = get_seg_entry(sbi, segno);
  2323. /* add discard candidates */
  2324. if (!(cpc->reason & CP_DISCARD)) {
  2325. cpc->trim_start = segno;
  2326. add_discard_addrs(sbi, cpc, false);
  2327. }
  2328. if (to_journal) {
  2329. offset = lookup_journal_in_cursum(journal,
  2330. SIT_JOURNAL, segno, 1);
  2331. f2fs_bug_on(sbi, offset < 0);
  2332. segno_in_journal(journal, offset) =
  2333. cpu_to_le32(segno);
  2334. seg_info_to_raw_sit(se,
  2335. &sit_in_journal(journal, offset));
  2336. } else {
  2337. sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
  2338. seg_info_to_raw_sit(se,
  2339. &raw_sit->entries[sit_offset]);
  2340. }
  2341. __clear_bit(segno, bitmap);
  2342. sit_i->dirty_sentries--;
  2343. ses->entry_cnt--;
  2344. }
  2345. if (to_journal)
  2346. up_write(&curseg->journal_rwsem);
  2347. else
  2348. f2fs_put_page(page, 1);
  2349. f2fs_bug_on(sbi, ses->entry_cnt);
  2350. release_sit_entry_set(ses);
  2351. }
  2352. f2fs_bug_on(sbi, !list_empty(head));
  2353. f2fs_bug_on(sbi, sit_i->dirty_sentries);
  2354. out:
  2355. if (cpc->reason & CP_DISCARD) {
  2356. __u64 trim_start = cpc->trim_start;
  2357. for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
  2358. add_discard_addrs(sbi, cpc, false);
  2359. cpc->trim_start = trim_start;
  2360. }
  2361. mutex_unlock(&sit_i->sentry_lock);
  2362. set_prefree_as_free_segments(sbi);
  2363. }
  2364. static int build_sit_info(struct f2fs_sb_info *sbi)
  2365. {
  2366. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  2367. struct sit_info *sit_i;
  2368. unsigned int sit_segs, start;
  2369. char *src_bitmap;
  2370. unsigned int bitmap_size;
  2371. /* allocate memory for SIT information */
  2372. sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
  2373. if (!sit_i)
  2374. return -ENOMEM;
  2375. SM_I(sbi)->sit_info = sit_i;
  2376. sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) *
  2377. sizeof(struct seg_entry), GFP_KERNEL);
  2378. if (!sit_i->sentries)
  2379. return -ENOMEM;
  2380. bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
  2381. sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
  2382. if (!sit_i->dirty_sentries_bitmap)
  2383. return -ENOMEM;
  2384. for (start = 0; start < MAIN_SEGS(sbi); start++) {
  2385. sit_i->sentries[start].cur_valid_map
  2386. = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2387. sit_i->sentries[start].ckpt_valid_map
  2388. = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2389. if (!sit_i->sentries[start].cur_valid_map ||
  2390. !sit_i->sentries[start].ckpt_valid_map)
  2391. return -ENOMEM;
  2392. #ifdef CONFIG_F2FS_CHECK_FS
  2393. sit_i->sentries[start].cur_valid_map_mir
  2394. = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2395. if (!sit_i->sentries[start].cur_valid_map_mir)
  2396. return -ENOMEM;
  2397. #endif
  2398. if (f2fs_discard_en(sbi)) {
  2399. sit_i->sentries[start].discard_map
  2400. = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2401. if (!sit_i->sentries[start].discard_map)
  2402. return -ENOMEM;
  2403. }
  2404. }
  2405. sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
  2406. if (!sit_i->tmp_map)
  2407. return -ENOMEM;
  2408. if (sbi->segs_per_sec > 1) {
  2409. sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) *
  2410. sizeof(struct sec_entry), GFP_KERNEL);
  2411. if (!sit_i->sec_entries)
  2412. return -ENOMEM;
  2413. }
  2414. /* get information related with SIT */
  2415. sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
  2416. /* setup SIT bitmap from ckeckpoint pack */
  2417. bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
  2418. src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
  2419. sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
  2420. if (!sit_i->sit_bitmap)
  2421. return -ENOMEM;
  2422. #ifdef CONFIG_F2FS_CHECK_FS
  2423. sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
  2424. if (!sit_i->sit_bitmap_mir)
  2425. return -ENOMEM;
  2426. #endif
  2427. /* init SIT information */
  2428. sit_i->s_ops = &default_salloc_ops;
  2429. sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
  2430. sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
  2431. sit_i->written_valid_blocks = 0;
  2432. sit_i->bitmap_size = bitmap_size;
  2433. sit_i->dirty_sentries = 0;
  2434. sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
  2435. sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
  2436. sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
  2437. mutex_init(&sit_i->sentry_lock);
  2438. return 0;
  2439. }
  2440. static int build_free_segmap(struct f2fs_sb_info *sbi)
  2441. {
  2442. struct free_segmap_info *free_i;
  2443. unsigned int bitmap_size, sec_bitmap_size;
  2444. /* allocate memory for free segmap information */
  2445. free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
  2446. if (!free_i)
  2447. return -ENOMEM;
  2448. SM_I(sbi)->free_info = free_i;
  2449. bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
  2450. free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL);
  2451. if (!free_i->free_segmap)
  2452. return -ENOMEM;
  2453. sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
  2454. free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL);
  2455. if (!free_i->free_secmap)
  2456. return -ENOMEM;
  2457. /* set all segments as dirty temporarily */
  2458. memset(free_i->free_segmap, 0xff, bitmap_size);
  2459. memset(free_i->free_secmap, 0xff, sec_bitmap_size);
  2460. /* init free segmap information */
  2461. free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
  2462. free_i->free_segments = 0;
  2463. free_i->free_sections = 0;
  2464. spin_lock_init(&free_i->segmap_lock);
  2465. return 0;
  2466. }
  2467. static int build_curseg(struct f2fs_sb_info *sbi)
  2468. {
  2469. struct curseg_info *array;
  2470. int i;
  2471. array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
  2472. if (!array)
  2473. return -ENOMEM;
  2474. SM_I(sbi)->curseg_array = array;
  2475. for (i = 0; i < NR_CURSEG_TYPE; i++) {
  2476. mutex_init(&array[i].curseg_mutex);
  2477. array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
  2478. if (!array[i].sum_blk)
  2479. return -ENOMEM;
  2480. init_rwsem(&array[i].journal_rwsem);
  2481. array[i].journal = kzalloc(sizeof(struct f2fs_journal),
  2482. GFP_KERNEL);
  2483. if (!array[i].journal)
  2484. return -ENOMEM;
  2485. array[i].segno = NULL_SEGNO;
  2486. array[i].next_blkoff = 0;
  2487. }
  2488. return restore_curseg_summaries(sbi);
  2489. }
  2490. static void build_sit_entries(struct f2fs_sb_info *sbi)
  2491. {
  2492. struct sit_info *sit_i = SIT_I(sbi);
  2493. struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
  2494. struct f2fs_journal *journal = curseg->journal;
  2495. struct seg_entry *se;
  2496. struct f2fs_sit_entry sit;
  2497. int sit_blk_cnt = SIT_BLK_CNT(sbi);
  2498. unsigned int i, start, end;
  2499. unsigned int readed, start_blk = 0;
  2500. do {
  2501. readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
  2502. META_SIT, true);
  2503. start = start_blk * sit_i->sents_per_block;
  2504. end = (start_blk + readed) * sit_i->sents_per_block;
  2505. for (; start < end && start < MAIN_SEGS(sbi); start++) {
  2506. struct f2fs_sit_block *sit_blk;
  2507. struct page *page;
  2508. se = &sit_i->sentries[start];
  2509. page = get_current_sit_page(sbi, start);
  2510. sit_blk = (struct f2fs_sit_block *)page_address(page);
  2511. sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
  2512. f2fs_put_page(page, 1);
  2513. check_block_count(sbi, start, &sit);
  2514. seg_info_from_raw_sit(se, &sit);
  2515. /* build discard map only one time */
  2516. if (f2fs_discard_en(sbi)) {
  2517. if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
  2518. memset(se->discard_map, 0xff,
  2519. SIT_VBLOCK_MAP_SIZE);
  2520. } else {
  2521. memcpy(se->discard_map,
  2522. se->cur_valid_map,
  2523. SIT_VBLOCK_MAP_SIZE);
  2524. sbi->discard_blks +=
  2525. sbi->blocks_per_seg -
  2526. se->valid_blocks;
  2527. }
  2528. }
  2529. if (sbi->segs_per_sec > 1)
  2530. get_sec_entry(sbi, start)->valid_blocks +=
  2531. se->valid_blocks;
  2532. }
  2533. start_blk += readed;
  2534. } while (start_blk < sit_blk_cnt);
  2535. down_read(&curseg->journal_rwsem);
  2536. for (i = 0; i < sits_in_cursum(journal); i++) {
  2537. unsigned int old_valid_blocks;
  2538. start = le32_to_cpu(segno_in_journal(journal, i));
  2539. se = &sit_i->sentries[start];
  2540. sit = sit_in_journal(journal, i);
  2541. old_valid_blocks = se->valid_blocks;
  2542. check_block_count(sbi, start, &sit);
  2543. seg_info_from_raw_sit(se, &sit);
  2544. if (f2fs_discard_en(sbi)) {
  2545. if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
  2546. memset(se->discard_map, 0xff,
  2547. SIT_VBLOCK_MAP_SIZE);
  2548. } else {
  2549. memcpy(se->discard_map, se->cur_valid_map,
  2550. SIT_VBLOCK_MAP_SIZE);
  2551. sbi->discard_blks += old_valid_blocks -
  2552. se->valid_blocks;
  2553. }
  2554. }
  2555. if (sbi->segs_per_sec > 1)
  2556. get_sec_entry(sbi, start)->valid_blocks +=
  2557. se->valid_blocks - old_valid_blocks;
  2558. }
  2559. up_read(&curseg->journal_rwsem);
  2560. }
  2561. static void init_free_segmap(struct f2fs_sb_info *sbi)
  2562. {
  2563. unsigned int start;
  2564. int type;
  2565. for (start = 0; start < MAIN_SEGS(sbi); start++) {
  2566. struct seg_entry *sentry = get_seg_entry(sbi, start);
  2567. if (!sentry->valid_blocks)
  2568. __set_free(sbi, start);
  2569. else
  2570. SIT_I(sbi)->written_valid_blocks +=
  2571. sentry->valid_blocks;
  2572. }
  2573. /* set use the current segments */
  2574. for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
  2575. struct curseg_info *curseg_t = CURSEG_I(sbi, type);
  2576. __set_test_and_inuse(sbi, curseg_t->segno);
  2577. }
  2578. }
  2579. static void init_dirty_segmap(struct f2fs_sb_info *sbi)
  2580. {
  2581. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2582. struct free_segmap_info *free_i = FREE_I(sbi);
  2583. unsigned int segno = 0, offset = 0;
  2584. unsigned short valid_blocks;
  2585. while (1) {
  2586. /* find dirty segment based on free segmap */
  2587. segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
  2588. if (segno >= MAIN_SEGS(sbi))
  2589. break;
  2590. offset = segno + 1;
  2591. valid_blocks = get_valid_blocks(sbi, segno, false);
  2592. if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
  2593. continue;
  2594. if (valid_blocks > sbi->blocks_per_seg) {
  2595. f2fs_bug_on(sbi, 1);
  2596. continue;
  2597. }
  2598. mutex_lock(&dirty_i->seglist_lock);
  2599. __locate_dirty_segment(sbi, segno, DIRTY);
  2600. mutex_unlock(&dirty_i->seglist_lock);
  2601. }
  2602. }
  2603. static int init_victim_secmap(struct f2fs_sb_info *sbi)
  2604. {
  2605. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2606. unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
  2607. dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
  2608. if (!dirty_i->victim_secmap)
  2609. return -ENOMEM;
  2610. return 0;
  2611. }
  2612. static int build_dirty_segmap(struct f2fs_sb_info *sbi)
  2613. {
  2614. struct dirty_seglist_info *dirty_i;
  2615. unsigned int bitmap_size, i;
  2616. /* allocate memory for dirty segments list information */
  2617. dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
  2618. if (!dirty_i)
  2619. return -ENOMEM;
  2620. SM_I(sbi)->dirty_info = dirty_i;
  2621. mutex_init(&dirty_i->seglist_lock);
  2622. bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
  2623. for (i = 0; i < NR_DIRTY_TYPE; i++) {
  2624. dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
  2625. if (!dirty_i->dirty_segmap[i])
  2626. return -ENOMEM;
  2627. }
  2628. init_dirty_segmap(sbi);
  2629. return init_victim_secmap(sbi);
  2630. }
  2631. /*
  2632. * Update min, max modified time for cost-benefit GC algorithm
  2633. */
  2634. static void init_min_max_mtime(struct f2fs_sb_info *sbi)
  2635. {
  2636. struct sit_info *sit_i = SIT_I(sbi);
  2637. unsigned int segno;
  2638. mutex_lock(&sit_i->sentry_lock);
  2639. sit_i->min_mtime = LLONG_MAX;
  2640. for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
  2641. unsigned int i;
  2642. unsigned long long mtime = 0;
  2643. for (i = 0; i < sbi->segs_per_sec; i++)
  2644. mtime += get_seg_entry(sbi, segno + i)->mtime;
  2645. mtime = div_u64(mtime, sbi->segs_per_sec);
  2646. if (sit_i->min_mtime > mtime)
  2647. sit_i->min_mtime = mtime;
  2648. }
  2649. sit_i->max_mtime = get_mtime(sbi);
  2650. mutex_unlock(&sit_i->sentry_lock);
  2651. }
  2652. int build_segment_manager(struct f2fs_sb_info *sbi)
  2653. {
  2654. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  2655. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  2656. struct f2fs_sm_info *sm_info;
  2657. int err;
  2658. sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
  2659. if (!sm_info)
  2660. return -ENOMEM;
  2661. /* init sm info */
  2662. sbi->sm_info = sm_info;
  2663. sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
  2664. sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
  2665. sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
  2666. sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
  2667. sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
  2668. sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
  2669. sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
  2670. sm_info->rec_prefree_segments = sm_info->main_segments *
  2671. DEF_RECLAIM_PREFREE_SEGMENTS / 100;
  2672. if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
  2673. sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
  2674. if (!test_opt(sbi, LFS))
  2675. sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
  2676. sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
  2677. sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
  2678. sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
  2679. sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
  2680. INIT_LIST_HEAD(&sm_info->sit_entry_set);
  2681. if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
  2682. err = create_flush_cmd_control(sbi);
  2683. if (err)
  2684. return err;
  2685. }
  2686. err = create_discard_cmd_control(sbi);
  2687. if (err)
  2688. return err;
  2689. err = build_sit_info(sbi);
  2690. if (err)
  2691. return err;
  2692. err = build_free_segmap(sbi);
  2693. if (err)
  2694. return err;
  2695. err = build_curseg(sbi);
  2696. if (err)
  2697. return err;
  2698. /* reinit free segmap based on SIT */
  2699. build_sit_entries(sbi);
  2700. init_free_segmap(sbi);
  2701. err = build_dirty_segmap(sbi);
  2702. if (err)
  2703. return err;
  2704. init_min_max_mtime(sbi);
  2705. return 0;
  2706. }
  2707. static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
  2708. enum dirty_type dirty_type)
  2709. {
  2710. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2711. mutex_lock(&dirty_i->seglist_lock);
  2712. kvfree(dirty_i->dirty_segmap[dirty_type]);
  2713. dirty_i->nr_dirty[dirty_type] = 0;
  2714. mutex_unlock(&dirty_i->seglist_lock);
  2715. }
  2716. static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
  2717. {
  2718. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2719. kvfree(dirty_i->victim_secmap);
  2720. }
  2721. static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
  2722. {
  2723. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  2724. int i;
  2725. if (!dirty_i)
  2726. return;
  2727. /* discard pre-free/dirty segments list */
  2728. for (i = 0; i < NR_DIRTY_TYPE; i++)
  2729. discard_dirty_segmap(sbi, i);
  2730. destroy_victim_secmap(sbi);
  2731. SM_I(sbi)->dirty_info = NULL;
  2732. kfree(dirty_i);
  2733. }
  2734. static void destroy_curseg(struct f2fs_sb_info *sbi)
  2735. {
  2736. struct curseg_info *array = SM_I(sbi)->curseg_array;
  2737. int i;
  2738. if (!array)
  2739. return;
  2740. SM_I(sbi)->curseg_array = NULL;
  2741. for (i = 0; i < NR_CURSEG_TYPE; i++) {
  2742. kfree(array[i].sum_blk);
  2743. kfree(array[i].journal);
  2744. }
  2745. kfree(array);
  2746. }
  2747. static void destroy_free_segmap(struct f2fs_sb_info *sbi)
  2748. {
  2749. struct free_segmap_info *free_i = SM_I(sbi)->free_info;
  2750. if (!free_i)
  2751. return;
  2752. SM_I(sbi)->free_info = NULL;
  2753. kvfree(free_i->free_segmap);
  2754. kvfree(free_i->free_secmap);
  2755. kfree(free_i);
  2756. }
  2757. static void destroy_sit_info(struct f2fs_sb_info *sbi)
  2758. {
  2759. struct sit_info *sit_i = SIT_I(sbi);
  2760. unsigned int start;
  2761. if (!sit_i)
  2762. return;
  2763. if (sit_i->sentries) {
  2764. for (start = 0; start < MAIN_SEGS(sbi); start++) {
  2765. kfree(sit_i->sentries[start].cur_valid_map);
  2766. #ifdef CONFIG_F2FS_CHECK_FS
  2767. kfree(sit_i->sentries[start].cur_valid_map_mir);
  2768. #endif
  2769. kfree(sit_i->sentries[start].ckpt_valid_map);
  2770. kfree(sit_i->sentries[start].discard_map);
  2771. }
  2772. }
  2773. kfree(sit_i->tmp_map);
  2774. kvfree(sit_i->sentries);
  2775. kvfree(sit_i->sec_entries);
  2776. kvfree(sit_i->dirty_sentries_bitmap);
  2777. SM_I(sbi)->sit_info = NULL;
  2778. kfree(sit_i->sit_bitmap);
  2779. #ifdef CONFIG_F2FS_CHECK_FS
  2780. kfree(sit_i->sit_bitmap_mir);
  2781. #endif
  2782. kfree(sit_i);
  2783. }
  2784. void destroy_segment_manager(struct f2fs_sb_info *sbi)
  2785. {
  2786. struct f2fs_sm_info *sm_info = SM_I(sbi);
  2787. if (!sm_info)
  2788. return;
  2789. destroy_flush_cmd_control(sbi, true);
  2790. destroy_discard_cmd_control(sbi);
  2791. destroy_dirty_segmap(sbi);
  2792. destroy_curseg(sbi);
  2793. destroy_free_segmap(sbi);
  2794. destroy_sit_info(sbi);
  2795. sbi->sm_info = NULL;
  2796. kfree(sm_info);
  2797. }
  2798. int __init create_segment_manager_caches(void)
  2799. {
  2800. discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
  2801. sizeof(struct discard_entry));
  2802. if (!discard_entry_slab)
  2803. goto fail;
  2804. discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
  2805. sizeof(struct discard_cmd));
  2806. if (!discard_cmd_slab)
  2807. goto destroy_discard_entry;
  2808. sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
  2809. sizeof(struct sit_entry_set));
  2810. if (!sit_entry_set_slab)
  2811. goto destroy_discard_cmd;
  2812. inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
  2813. sizeof(struct inmem_pages));
  2814. if (!inmem_entry_slab)
  2815. goto destroy_sit_entry_set;
  2816. return 0;
  2817. destroy_sit_entry_set:
  2818. kmem_cache_destroy(sit_entry_set_slab);
  2819. destroy_discard_cmd:
  2820. kmem_cache_destroy(discard_cmd_slab);
  2821. destroy_discard_entry:
  2822. kmem_cache_destroy(discard_entry_slab);
  2823. fail:
  2824. return -ENOMEM;
  2825. }
  2826. void destroy_segment_manager_caches(void)
  2827. {
  2828. kmem_cache_destroy(sit_entry_set_slab);
  2829. kmem_cache_destroy(discard_cmd_slab);
  2830. kmem_cache_destroy(discard_entry_slab);
  2831. kmem_cache_destroy(inmem_entry_slab);
  2832. }