segment.c 75 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832
  1. /*
  2. * segment.c - NILFS segment constructor.
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * Written by Ryusuke Konishi.
  17. *
  18. */
  19. #include <linux/pagemap.h>
  20. #include <linux/buffer_head.h>
  21. #include <linux/writeback.h>
  22. #include <linux/bitops.h>
  23. #include <linux/bio.h>
  24. #include <linux/completion.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/backing-dev.h>
  27. #include <linux/freezer.h>
  28. #include <linux/kthread.h>
  29. #include <linux/crc32.h>
  30. #include <linux/pagevec.h>
  31. #include <linux/slab.h>
  32. #include <linux/sched/signal.h>
  33. #include "nilfs.h"
  34. #include "btnode.h"
  35. #include "page.h"
  36. #include "segment.h"
  37. #include "sufile.h"
  38. #include "cpfile.h"
  39. #include "ifile.h"
  40. #include "segbuf.h"
  41. /*
  42. * Segment constructor
  43. */
  44. #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
  45. #define SC_MAX_SEGDELTA 64 /*
  46. * Upper limit of the number of segments
  47. * appended in collection retry loop
  48. */
  49. /* Construction mode */
  50. enum {
  51. SC_LSEG_SR = 1, /* Make a logical segment having a super root */
  52. SC_LSEG_DSYNC, /*
  53. * Flush data blocks of a given file and make
  54. * a logical segment without a super root.
  55. */
  56. SC_FLUSH_FILE, /*
  57. * Flush data files, leads to segment writes without
  58. * creating a checkpoint.
  59. */
  60. SC_FLUSH_DAT, /*
  61. * Flush DAT file. This also creates segments
  62. * without a checkpoint.
  63. */
  64. };
  65. /* Stage numbers of dirty block collection */
  66. enum {
  67. NILFS_ST_INIT = 0,
  68. NILFS_ST_GC, /* Collecting dirty blocks for GC */
  69. NILFS_ST_FILE,
  70. NILFS_ST_IFILE,
  71. NILFS_ST_CPFILE,
  72. NILFS_ST_SUFILE,
  73. NILFS_ST_DAT,
  74. NILFS_ST_SR, /* Super root */
  75. NILFS_ST_DSYNC, /* Data sync blocks */
  76. NILFS_ST_DONE,
  77. };
  78. #define CREATE_TRACE_POINTS
  79. #include <trace/events/nilfs2.h>
  80. /*
  81. * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
  82. * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
  83. * the variable must use them because transition of stage count must involve
  84. * trace events (trace_nilfs2_collection_stage_transition).
  85. *
  86. * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
  87. * produce tracepoint events. It is provided just for making the intention
  88. * clear.
  89. */
  90. static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
  91. {
  92. sci->sc_stage.scnt++;
  93. trace_nilfs2_collection_stage_transition(sci);
  94. }
  95. static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
  96. {
  97. sci->sc_stage.scnt = next_scnt;
  98. trace_nilfs2_collection_stage_transition(sci);
  99. }
  100. static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
  101. {
  102. return sci->sc_stage.scnt;
  103. }
  104. /* State flags of collection */
  105. #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
  106. #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
  107. #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
  108. #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
  109. /* Operations depending on the construction mode and file type */
  110. struct nilfs_sc_operations {
  111. int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
  112. struct inode *);
  113. int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
  114. struct inode *);
  115. int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
  116. struct inode *);
  117. void (*write_data_binfo)(struct nilfs_sc_info *,
  118. struct nilfs_segsum_pointer *,
  119. union nilfs_binfo *);
  120. void (*write_node_binfo)(struct nilfs_sc_info *,
  121. struct nilfs_segsum_pointer *,
  122. union nilfs_binfo *);
  123. };
  124. /*
  125. * Other definitions
  126. */
  127. static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
  128. static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
  129. static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
  130. static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
  131. #define nilfs_cnt32_gt(a, b) \
  132. (typecheck(__u32, a) && typecheck(__u32, b) && \
  133. ((__s32)(b) - (__s32)(a) < 0))
  134. #define nilfs_cnt32_ge(a, b) \
  135. (typecheck(__u32, a) && typecheck(__u32, b) && \
  136. ((__s32)(a) - (__s32)(b) >= 0))
  137. #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
  138. #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
  139. static int nilfs_prepare_segment_lock(struct super_block *sb,
  140. struct nilfs_transaction_info *ti)
  141. {
  142. struct nilfs_transaction_info *cur_ti = current->journal_info;
  143. void *save = NULL;
  144. if (cur_ti) {
  145. if (cur_ti->ti_magic == NILFS_TI_MAGIC)
  146. return ++cur_ti->ti_count;
  147. /*
  148. * If journal_info field is occupied by other FS,
  149. * it is saved and will be restored on
  150. * nilfs_transaction_commit().
  151. */
  152. nilfs_msg(sb, KERN_WARNING, "journal info from a different FS");
  153. save = current->journal_info;
  154. }
  155. if (!ti) {
  156. ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
  157. if (!ti)
  158. return -ENOMEM;
  159. ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
  160. } else {
  161. ti->ti_flags = 0;
  162. }
  163. ti->ti_count = 0;
  164. ti->ti_save = save;
  165. ti->ti_magic = NILFS_TI_MAGIC;
  166. current->journal_info = ti;
  167. return 0;
  168. }
  169. /**
  170. * nilfs_transaction_begin - start indivisible file operations.
  171. * @sb: super block
  172. * @ti: nilfs_transaction_info
  173. * @vacancy_check: flags for vacancy rate checks
  174. *
  175. * nilfs_transaction_begin() acquires a reader/writer semaphore, called
  176. * the segment semaphore, to make a segment construction and write tasks
  177. * exclusive. The function is used with nilfs_transaction_commit() in pairs.
  178. * The region enclosed by these two functions can be nested. To avoid a
  179. * deadlock, the semaphore is only acquired or released in the outermost call.
  180. *
  181. * This function allocates a nilfs_transaction_info struct to keep context
  182. * information on it. It is initialized and hooked onto the current task in
  183. * the outermost call. If a pre-allocated struct is given to @ti, it is used
  184. * instead; otherwise a new struct is assigned from a slab.
  185. *
  186. * When @vacancy_check flag is set, this function will check the amount of
  187. * free space, and will wait for the GC to reclaim disk space if low capacity.
  188. *
  189. * Return Value: On success, 0 is returned. On error, one of the following
  190. * negative error code is returned.
  191. *
  192. * %-ENOMEM - Insufficient memory available.
  193. *
  194. * %-ENOSPC - No space left on device
  195. */
  196. int nilfs_transaction_begin(struct super_block *sb,
  197. struct nilfs_transaction_info *ti,
  198. int vacancy_check)
  199. {
  200. struct the_nilfs *nilfs;
  201. int ret = nilfs_prepare_segment_lock(sb, ti);
  202. struct nilfs_transaction_info *trace_ti;
  203. if (unlikely(ret < 0))
  204. return ret;
  205. if (ret > 0) {
  206. trace_ti = current->journal_info;
  207. trace_nilfs2_transaction_transition(sb, trace_ti,
  208. trace_ti->ti_count, trace_ti->ti_flags,
  209. TRACE_NILFS2_TRANSACTION_BEGIN);
  210. return 0;
  211. }
  212. sb_start_intwrite(sb);
  213. nilfs = sb->s_fs_info;
  214. down_read(&nilfs->ns_segctor_sem);
  215. if (vacancy_check && nilfs_near_disk_full(nilfs)) {
  216. up_read(&nilfs->ns_segctor_sem);
  217. ret = -ENOSPC;
  218. goto failed;
  219. }
  220. trace_ti = current->journal_info;
  221. trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
  222. trace_ti->ti_flags,
  223. TRACE_NILFS2_TRANSACTION_BEGIN);
  224. return 0;
  225. failed:
  226. ti = current->journal_info;
  227. current->journal_info = ti->ti_save;
  228. if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
  229. kmem_cache_free(nilfs_transaction_cachep, ti);
  230. sb_end_intwrite(sb);
  231. return ret;
  232. }
  233. /**
  234. * nilfs_transaction_commit - commit indivisible file operations.
  235. * @sb: super block
  236. *
  237. * nilfs_transaction_commit() releases the read semaphore which is
  238. * acquired by nilfs_transaction_begin(). This is only performed
  239. * in outermost call of this function. If a commit flag is set,
  240. * nilfs_transaction_commit() sets a timer to start the segment
  241. * constructor. If a sync flag is set, it starts construction
  242. * directly.
  243. */
  244. int nilfs_transaction_commit(struct super_block *sb)
  245. {
  246. struct nilfs_transaction_info *ti = current->journal_info;
  247. struct the_nilfs *nilfs = sb->s_fs_info;
  248. int err = 0;
  249. BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
  250. ti->ti_flags |= NILFS_TI_COMMIT;
  251. if (ti->ti_count > 0) {
  252. ti->ti_count--;
  253. trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
  254. ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
  255. return 0;
  256. }
  257. if (nilfs->ns_writer) {
  258. struct nilfs_sc_info *sci = nilfs->ns_writer;
  259. if (ti->ti_flags & NILFS_TI_COMMIT)
  260. nilfs_segctor_start_timer(sci);
  261. if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
  262. nilfs_segctor_do_flush(sci, 0);
  263. }
  264. up_read(&nilfs->ns_segctor_sem);
  265. trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
  266. ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
  267. current->journal_info = ti->ti_save;
  268. if (ti->ti_flags & NILFS_TI_SYNC)
  269. err = nilfs_construct_segment(sb);
  270. if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
  271. kmem_cache_free(nilfs_transaction_cachep, ti);
  272. sb_end_intwrite(sb);
  273. return err;
  274. }
  275. void nilfs_transaction_abort(struct super_block *sb)
  276. {
  277. struct nilfs_transaction_info *ti = current->journal_info;
  278. struct the_nilfs *nilfs = sb->s_fs_info;
  279. BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
  280. if (ti->ti_count > 0) {
  281. ti->ti_count--;
  282. trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
  283. ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
  284. return;
  285. }
  286. up_read(&nilfs->ns_segctor_sem);
  287. trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
  288. ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
  289. current->journal_info = ti->ti_save;
  290. if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
  291. kmem_cache_free(nilfs_transaction_cachep, ti);
  292. sb_end_intwrite(sb);
  293. }
  294. void nilfs_relax_pressure_in_lock(struct super_block *sb)
  295. {
  296. struct the_nilfs *nilfs = sb->s_fs_info;
  297. struct nilfs_sc_info *sci = nilfs->ns_writer;
  298. if (!sci || !sci->sc_flush_request)
  299. return;
  300. set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
  301. up_read(&nilfs->ns_segctor_sem);
  302. down_write(&nilfs->ns_segctor_sem);
  303. if (sci->sc_flush_request &&
  304. test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
  305. struct nilfs_transaction_info *ti = current->journal_info;
  306. ti->ti_flags |= NILFS_TI_WRITER;
  307. nilfs_segctor_do_immediate_flush(sci);
  308. ti->ti_flags &= ~NILFS_TI_WRITER;
  309. }
  310. downgrade_write(&nilfs->ns_segctor_sem);
  311. }
  312. static void nilfs_transaction_lock(struct super_block *sb,
  313. struct nilfs_transaction_info *ti,
  314. int gcflag)
  315. {
  316. struct nilfs_transaction_info *cur_ti = current->journal_info;
  317. struct the_nilfs *nilfs = sb->s_fs_info;
  318. struct nilfs_sc_info *sci = nilfs->ns_writer;
  319. WARN_ON(cur_ti);
  320. ti->ti_flags = NILFS_TI_WRITER;
  321. ti->ti_count = 0;
  322. ti->ti_save = cur_ti;
  323. ti->ti_magic = NILFS_TI_MAGIC;
  324. current->journal_info = ti;
  325. for (;;) {
  326. trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
  327. ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
  328. down_write(&nilfs->ns_segctor_sem);
  329. if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
  330. break;
  331. nilfs_segctor_do_immediate_flush(sci);
  332. up_write(&nilfs->ns_segctor_sem);
  333. cond_resched();
  334. }
  335. if (gcflag)
  336. ti->ti_flags |= NILFS_TI_GC;
  337. trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
  338. ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
  339. }
  340. static void nilfs_transaction_unlock(struct super_block *sb)
  341. {
  342. struct nilfs_transaction_info *ti = current->journal_info;
  343. struct the_nilfs *nilfs = sb->s_fs_info;
  344. BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
  345. BUG_ON(ti->ti_count > 0);
  346. up_write(&nilfs->ns_segctor_sem);
  347. current->journal_info = ti->ti_save;
  348. trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
  349. ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
  350. }
  351. static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
  352. struct nilfs_segsum_pointer *ssp,
  353. unsigned int bytes)
  354. {
  355. struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
  356. unsigned int blocksize = sci->sc_super->s_blocksize;
  357. void *p;
  358. if (unlikely(ssp->offset + bytes > blocksize)) {
  359. ssp->offset = 0;
  360. BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
  361. &segbuf->sb_segsum_buffers));
  362. ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
  363. }
  364. p = ssp->bh->b_data + ssp->offset;
  365. ssp->offset += bytes;
  366. return p;
  367. }
  368. /**
  369. * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
  370. * @sci: nilfs_sc_info
  371. */
  372. static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
  373. {
  374. struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
  375. struct buffer_head *sumbh;
  376. unsigned int sumbytes;
  377. unsigned int flags = 0;
  378. int err;
  379. if (nilfs_doing_gc())
  380. flags = NILFS_SS_GC;
  381. err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
  382. if (unlikely(err))
  383. return err;
  384. sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
  385. sumbytes = segbuf->sb_sum.sumbytes;
  386. sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
  387. sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
  388. sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
  389. return 0;
  390. }
  391. static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
  392. {
  393. sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
  394. if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
  395. return -E2BIG; /*
  396. * The current segment is filled up
  397. * (internal code)
  398. */
  399. sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
  400. return nilfs_segctor_reset_segment_buffer(sci);
  401. }
  402. static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
  403. {
  404. struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
  405. int err;
  406. if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
  407. err = nilfs_segctor_feed_segment(sci);
  408. if (err)
  409. return err;
  410. segbuf = sci->sc_curseg;
  411. }
  412. err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
  413. if (likely(!err))
  414. segbuf->sb_sum.flags |= NILFS_SS_SR;
  415. return err;
  416. }
  417. /*
  418. * Functions for making segment summary and payloads
  419. */
  420. static int nilfs_segctor_segsum_block_required(
  421. struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
  422. unsigned int binfo_size)
  423. {
  424. unsigned int blocksize = sci->sc_super->s_blocksize;
  425. /* Size of finfo and binfo is enough small against blocksize */
  426. return ssp->offset + binfo_size +
  427. (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
  428. blocksize;
  429. }
  430. static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
  431. struct inode *inode)
  432. {
  433. sci->sc_curseg->sb_sum.nfinfo++;
  434. sci->sc_binfo_ptr = sci->sc_finfo_ptr;
  435. nilfs_segctor_map_segsum_entry(
  436. sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
  437. if (NILFS_I(inode)->i_root &&
  438. !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
  439. set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
  440. /* skip finfo */
  441. }
  442. static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
  443. struct inode *inode)
  444. {
  445. struct nilfs_finfo *finfo;
  446. struct nilfs_inode_info *ii;
  447. struct nilfs_segment_buffer *segbuf;
  448. __u64 cno;
  449. if (sci->sc_blk_cnt == 0)
  450. return;
  451. ii = NILFS_I(inode);
  452. if (test_bit(NILFS_I_GCINODE, &ii->i_state))
  453. cno = ii->i_cno;
  454. else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
  455. cno = 0;
  456. else
  457. cno = sci->sc_cno;
  458. finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
  459. sizeof(*finfo));
  460. finfo->fi_ino = cpu_to_le64(inode->i_ino);
  461. finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
  462. finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
  463. finfo->fi_cno = cpu_to_le64(cno);
  464. segbuf = sci->sc_curseg;
  465. segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
  466. sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
  467. sci->sc_finfo_ptr = sci->sc_binfo_ptr;
  468. sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
  469. }
  470. static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
  471. struct buffer_head *bh,
  472. struct inode *inode,
  473. unsigned int binfo_size)
  474. {
  475. struct nilfs_segment_buffer *segbuf;
  476. int required, err = 0;
  477. retry:
  478. segbuf = sci->sc_curseg;
  479. required = nilfs_segctor_segsum_block_required(
  480. sci, &sci->sc_binfo_ptr, binfo_size);
  481. if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
  482. nilfs_segctor_end_finfo(sci, inode);
  483. err = nilfs_segctor_feed_segment(sci);
  484. if (err)
  485. return err;
  486. goto retry;
  487. }
  488. if (unlikely(required)) {
  489. err = nilfs_segbuf_extend_segsum(segbuf);
  490. if (unlikely(err))
  491. goto failed;
  492. }
  493. if (sci->sc_blk_cnt == 0)
  494. nilfs_segctor_begin_finfo(sci, inode);
  495. nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
  496. /* Substitution to vblocknr is delayed until update_blocknr() */
  497. nilfs_segbuf_add_file_buffer(segbuf, bh);
  498. sci->sc_blk_cnt++;
  499. failed:
  500. return err;
  501. }
  502. /*
  503. * Callback functions that enumerate, mark, and collect dirty blocks
  504. */
  505. static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
  506. struct buffer_head *bh, struct inode *inode)
  507. {
  508. int err;
  509. err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
  510. if (err < 0)
  511. return err;
  512. err = nilfs_segctor_add_file_block(sci, bh, inode,
  513. sizeof(struct nilfs_binfo_v));
  514. if (!err)
  515. sci->sc_datablk_cnt++;
  516. return err;
  517. }
  518. static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
  519. struct buffer_head *bh,
  520. struct inode *inode)
  521. {
  522. return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
  523. }
  524. static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
  525. struct buffer_head *bh,
  526. struct inode *inode)
  527. {
  528. WARN_ON(!buffer_dirty(bh));
  529. return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
  530. }
  531. static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
  532. struct nilfs_segsum_pointer *ssp,
  533. union nilfs_binfo *binfo)
  534. {
  535. struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
  536. sci, ssp, sizeof(*binfo_v));
  537. *binfo_v = binfo->bi_v;
  538. }
  539. static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
  540. struct nilfs_segsum_pointer *ssp,
  541. union nilfs_binfo *binfo)
  542. {
  543. __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
  544. sci, ssp, sizeof(*vblocknr));
  545. *vblocknr = binfo->bi_v.bi_vblocknr;
  546. }
  547. static const struct nilfs_sc_operations nilfs_sc_file_ops = {
  548. .collect_data = nilfs_collect_file_data,
  549. .collect_node = nilfs_collect_file_node,
  550. .collect_bmap = nilfs_collect_file_bmap,
  551. .write_data_binfo = nilfs_write_file_data_binfo,
  552. .write_node_binfo = nilfs_write_file_node_binfo,
  553. };
  554. static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
  555. struct buffer_head *bh, struct inode *inode)
  556. {
  557. int err;
  558. err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
  559. if (err < 0)
  560. return err;
  561. err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
  562. if (!err)
  563. sci->sc_datablk_cnt++;
  564. return err;
  565. }
  566. static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
  567. struct buffer_head *bh, struct inode *inode)
  568. {
  569. WARN_ON(!buffer_dirty(bh));
  570. return nilfs_segctor_add_file_block(sci, bh, inode,
  571. sizeof(struct nilfs_binfo_dat));
  572. }
  573. static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
  574. struct nilfs_segsum_pointer *ssp,
  575. union nilfs_binfo *binfo)
  576. {
  577. __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
  578. sizeof(*blkoff));
  579. *blkoff = binfo->bi_dat.bi_blkoff;
  580. }
  581. static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
  582. struct nilfs_segsum_pointer *ssp,
  583. union nilfs_binfo *binfo)
  584. {
  585. struct nilfs_binfo_dat *binfo_dat =
  586. nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
  587. *binfo_dat = binfo->bi_dat;
  588. }
  589. static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
  590. .collect_data = nilfs_collect_dat_data,
  591. .collect_node = nilfs_collect_file_node,
  592. .collect_bmap = nilfs_collect_dat_bmap,
  593. .write_data_binfo = nilfs_write_dat_data_binfo,
  594. .write_node_binfo = nilfs_write_dat_node_binfo,
  595. };
  596. static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
  597. .collect_data = nilfs_collect_file_data,
  598. .collect_node = NULL,
  599. .collect_bmap = NULL,
  600. .write_data_binfo = nilfs_write_file_data_binfo,
  601. .write_node_binfo = NULL,
  602. };
  603. static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
  604. struct list_head *listp,
  605. size_t nlimit,
  606. loff_t start, loff_t end)
  607. {
  608. struct address_space *mapping = inode->i_mapping;
  609. struct pagevec pvec;
  610. pgoff_t index = 0, last = ULONG_MAX;
  611. size_t ndirties = 0;
  612. int i;
  613. if (unlikely(start != 0 || end != LLONG_MAX)) {
  614. /*
  615. * A valid range is given for sync-ing data pages. The
  616. * range is rounded to per-page; extra dirty buffers
  617. * may be included if blocksize < pagesize.
  618. */
  619. index = start >> PAGE_SHIFT;
  620. last = end >> PAGE_SHIFT;
  621. }
  622. pagevec_init(&pvec, 0);
  623. repeat:
  624. if (unlikely(index > last) ||
  625. !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
  626. min_t(pgoff_t, last - index,
  627. PAGEVEC_SIZE - 1) + 1))
  628. return ndirties;
  629. for (i = 0; i < pagevec_count(&pvec); i++) {
  630. struct buffer_head *bh, *head;
  631. struct page *page = pvec.pages[i];
  632. if (unlikely(page->index > last))
  633. break;
  634. lock_page(page);
  635. if (!page_has_buffers(page))
  636. create_empty_buffers(page, i_blocksize(inode), 0);
  637. unlock_page(page);
  638. bh = head = page_buffers(page);
  639. do {
  640. if (!buffer_dirty(bh) || buffer_async_write(bh))
  641. continue;
  642. get_bh(bh);
  643. list_add_tail(&bh->b_assoc_buffers, listp);
  644. ndirties++;
  645. if (unlikely(ndirties >= nlimit)) {
  646. pagevec_release(&pvec);
  647. cond_resched();
  648. return ndirties;
  649. }
  650. } while (bh = bh->b_this_page, bh != head);
  651. }
  652. pagevec_release(&pvec);
  653. cond_resched();
  654. goto repeat;
  655. }
  656. static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
  657. struct list_head *listp)
  658. {
  659. struct nilfs_inode_info *ii = NILFS_I(inode);
  660. struct address_space *mapping = &ii->i_btnode_cache;
  661. struct pagevec pvec;
  662. struct buffer_head *bh, *head;
  663. unsigned int i;
  664. pgoff_t index = 0;
  665. pagevec_init(&pvec, 0);
  666. while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
  667. PAGEVEC_SIZE)) {
  668. for (i = 0; i < pagevec_count(&pvec); i++) {
  669. bh = head = page_buffers(pvec.pages[i]);
  670. do {
  671. if (buffer_dirty(bh) &&
  672. !buffer_async_write(bh)) {
  673. get_bh(bh);
  674. list_add_tail(&bh->b_assoc_buffers,
  675. listp);
  676. }
  677. bh = bh->b_this_page;
  678. } while (bh != head);
  679. }
  680. pagevec_release(&pvec);
  681. cond_resched();
  682. }
  683. }
  684. static void nilfs_dispose_list(struct the_nilfs *nilfs,
  685. struct list_head *head, int force)
  686. {
  687. struct nilfs_inode_info *ii, *n;
  688. struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
  689. unsigned int nv = 0;
  690. while (!list_empty(head)) {
  691. spin_lock(&nilfs->ns_inode_lock);
  692. list_for_each_entry_safe(ii, n, head, i_dirty) {
  693. list_del_init(&ii->i_dirty);
  694. if (force) {
  695. if (unlikely(ii->i_bh)) {
  696. brelse(ii->i_bh);
  697. ii->i_bh = NULL;
  698. }
  699. } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
  700. set_bit(NILFS_I_QUEUED, &ii->i_state);
  701. list_add_tail(&ii->i_dirty,
  702. &nilfs->ns_dirty_files);
  703. continue;
  704. }
  705. ivec[nv++] = ii;
  706. if (nv == SC_N_INODEVEC)
  707. break;
  708. }
  709. spin_unlock(&nilfs->ns_inode_lock);
  710. for (pii = ivec; nv > 0; pii++, nv--)
  711. iput(&(*pii)->vfs_inode);
  712. }
  713. }
  714. static void nilfs_iput_work_func(struct work_struct *work)
  715. {
  716. struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
  717. sc_iput_work);
  718. struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
  719. nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
  720. }
  721. static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
  722. struct nilfs_root *root)
  723. {
  724. int ret = 0;
  725. if (nilfs_mdt_fetch_dirty(root->ifile))
  726. ret++;
  727. if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
  728. ret++;
  729. if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
  730. ret++;
  731. if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
  732. ret++;
  733. return ret;
  734. }
  735. static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
  736. {
  737. return list_empty(&sci->sc_dirty_files) &&
  738. !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
  739. sci->sc_nfreesegs == 0 &&
  740. (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
  741. }
  742. static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
  743. {
  744. struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
  745. int ret = 0;
  746. if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
  747. set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
  748. spin_lock(&nilfs->ns_inode_lock);
  749. if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
  750. ret++;
  751. spin_unlock(&nilfs->ns_inode_lock);
  752. return ret;
  753. }
  754. static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
  755. {
  756. struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
  757. nilfs_mdt_clear_dirty(sci->sc_root->ifile);
  758. nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
  759. nilfs_mdt_clear_dirty(nilfs->ns_sufile);
  760. nilfs_mdt_clear_dirty(nilfs->ns_dat);
  761. }
  762. static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
  763. {
  764. struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
  765. struct buffer_head *bh_cp;
  766. struct nilfs_checkpoint *raw_cp;
  767. int err;
  768. /* XXX: this interface will be changed */
  769. err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
  770. &raw_cp, &bh_cp);
  771. if (likely(!err)) {
  772. /*
  773. * The following code is duplicated with cpfile. But, it is
  774. * needed to collect the checkpoint even if it was not newly
  775. * created.
  776. */
  777. mark_buffer_dirty(bh_cp);
  778. nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
  779. nilfs_cpfile_put_checkpoint(
  780. nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
  781. } else
  782. WARN_ON(err == -EINVAL || err == -ENOENT);
  783. return err;
  784. }
  785. static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
  786. {
  787. struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
  788. struct buffer_head *bh_cp;
  789. struct nilfs_checkpoint *raw_cp;
  790. int err;
  791. err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
  792. &raw_cp, &bh_cp);
  793. if (unlikely(err)) {
  794. WARN_ON(err == -EINVAL || err == -ENOENT);
  795. goto failed_ibh;
  796. }
  797. raw_cp->cp_snapshot_list.ssl_next = 0;
  798. raw_cp->cp_snapshot_list.ssl_prev = 0;
  799. raw_cp->cp_inodes_count =
  800. cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
  801. raw_cp->cp_blocks_count =
  802. cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
  803. raw_cp->cp_nblk_inc =
  804. cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
  805. raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
  806. raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
  807. if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
  808. nilfs_checkpoint_clear_minor(raw_cp);
  809. else
  810. nilfs_checkpoint_set_minor(raw_cp);
  811. nilfs_write_inode_common(sci->sc_root->ifile,
  812. &raw_cp->cp_ifile_inode, 1);
  813. nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
  814. return 0;
  815. failed_ibh:
  816. return err;
  817. }
  818. static void nilfs_fill_in_file_bmap(struct inode *ifile,
  819. struct nilfs_inode_info *ii)
  820. {
  821. struct buffer_head *ibh;
  822. struct nilfs_inode *raw_inode;
  823. if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
  824. ibh = ii->i_bh;
  825. BUG_ON(!ibh);
  826. raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
  827. ibh);
  828. nilfs_bmap_write(ii->i_bmap, raw_inode);
  829. nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
  830. }
  831. }
  832. static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
  833. {
  834. struct nilfs_inode_info *ii;
  835. list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
  836. nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
  837. set_bit(NILFS_I_COLLECTED, &ii->i_state);
  838. }
  839. }
  840. static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
  841. struct the_nilfs *nilfs)
  842. {
  843. struct buffer_head *bh_sr;
  844. struct nilfs_super_root *raw_sr;
  845. unsigned int isz, srsz;
  846. bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
  847. raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
  848. isz = nilfs->ns_inode_size;
  849. srsz = NILFS_SR_BYTES(isz);
  850. raw_sr->sr_bytes = cpu_to_le16(srsz);
  851. raw_sr->sr_nongc_ctime
  852. = cpu_to_le64(nilfs_doing_gc() ?
  853. nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
  854. raw_sr->sr_flags = 0;
  855. nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
  856. NILFS_SR_DAT_OFFSET(isz), 1);
  857. nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
  858. NILFS_SR_CPFILE_OFFSET(isz), 1);
  859. nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
  860. NILFS_SR_SUFILE_OFFSET(isz), 1);
  861. memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
  862. }
  863. static void nilfs_redirty_inodes(struct list_head *head)
  864. {
  865. struct nilfs_inode_info *ii;
  866. list_for_each_entry(ii, head, i_dirty) {
  867. if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
  868. clear_bit(NILFS_I_COLLECTED, &ii->i_state);
  869. }
  870. }
  871. static void nilfs_drop_collected_inodes(struct list_head *head)
  872. {
  873. struct nilfs_inode_info *ii;
  874. list_for_each_entry(ii, head, i_dirty) {
  875. if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
  876. continue;
  877. clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
  878. set_bit(NILFS_I_UPDATED, &ii->i_state);
  879. }
  880. }
  881. static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
  882. struct inode *inode,
  883. struct list_head *listp,
  884. int (*collect)(struct nilfs_sc_info *,
  885. struct buffer_head *,
  886. struct inode *))
  887. {
  888. struct buffer_head *bh, *n;
  889. int err = 0;
  890. if (collect) {
  891. list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
  892. list_del_init(&bh->b_assoc_buffers);
  893. err = collect(sci, bh, inode);
  894. brelse(bh);
  895. if (unlikely(err))
  896. goto dispose_buffers;
  897. }
  898. return 0;
  899. }
  900. dispose_buffers:
  901. while (!list_empty(listp)) {
  902. bh = list_first_entry(listp, struct buffer_head,
  903. b_assoc_buffers);
  904. list_del_init(&bh->b_assoc_buffers);
  905. brelse(bh);
  906. }
  907. return err;
  908. }
  909. static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
  910. {
  911. /* Remaining number of blocks within segment buffer */
  912. return sci->sc_segbuf_nblocks -
  913. (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
  914. }
  915. static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
  916. struct inode *inode,
  917. const struct nilfs_sc_operations *sc_ops)
  918. {
  919. LIST_HEAD(data_buffers);
  920. LIST_HEAD(node_buffers);
  921. int err;
  922. if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
  923. size_t n, rest = nilfs_segctor_buffer_rest(sci);
  924. n = nilfs_lookup_dirty_data_buffers(
  925. inode, &data_buffers, rest + 1, 0, LLONG_MAX);
  926. if (n > rest) {
  927. err = nilfs_segctor_apply_buffers(
  928. sci, inode, &data_buffers,
  929. sc_ops->collect_data);
  930. BUG_ON(!err); /* always receive -E2BIG or true error */
  931. goto break_or_fail;
  932. }
  933. }
  934. nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
  935. if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
  936. err = nilfs_segctor_apply_buffers(
  937. sci, inode, &data_buffers, sc_ops->collect_data);
  938. if (unlikely(err)) {
  939. /* dispose node list */
  940. nilfs_segctor_apply_buffers(
  941. sci, inode, &node_buffers, NULL);
  942. goto break_or_fail;
  943. }
  944. sci->sc_stage.flags |= NILFS_CF_NODE;
  945. }
  946. /* Collect node */
  947. err = nilfs_segctor_apply_buffers(
  948. sci, inode, &node_buffers, sc_ops->collect_node);
  949. if (unlikely(err))
  950. goto break_or_fail;
  951. nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
  952. err = nilfs_segctor_apply_buffers(
  953. sci, inode, &node_buffers, sc_ops->collect_bmap);
  954. if (unlikely(err))
  955. goto break_or_fail;
  956. nilfs_segctor_end_finfo(sci, inode);
  957. sci->sc_stage.flags &= ~NILFS_CF_NODE;
  958. break_or_fail:
  959. return err;
  960. }
  961. static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
  962. struct inode *inode)
  963. {
  964. LIST_HEAD(data_buffers);
  965. size_t n, rest = nilfs_segctor_buffer_rest(sci);
  966. int err;
  967. n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
  968. sci->sc_dsync_start,
  969. sci->sc_dsync_end);
  970. err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
  971. nilfs_collect_file_data);
  972. if (!err) {
  973. nilfs_segctor_end_finfo(sci, inode);
  974. BUG_ON(n > rest);
  975. /* always receive -E2BIG or true error if n > rest */
  976. }
  977. return err;
  978. }
  979. static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
  980. {
  981. struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
  982. struct list_head *head;
  983. struct nilfs_inode_info *ii;
  984. size_t ndone;
  985. int err = 0;
  986. switch (nilfs_sc_cstage_get(sci)) {
  987. case NILFS_ST_INIT:
  988. /* Pre-processes */
  989. sci->sc_stage.flags = 0;
  990. if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
  991. sci->sc_nblk_inc = 0;
  992. sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
  993. if (mode == SC_LSEG_DSYNC) {
  994. nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
  995. goto dsync_mode;
  996. }
  997. }
  998. sci->sc_stage.dirty_file_ptr = NULL;
  999. sci->sc_stage.gc_inode_ptr = NULL;
  1000. if (mode == SC_FLUSH_DAT) {
  1001. nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
  1002. goto dat_stage;
  1003. }
  1004. nilfs_sc_cstage_inc(sci); /* Fall through */
  1005. case NILFS_ST_GC:
  1006. if (nilfs_doing_gc()) {
  1007. head = &sci->sc_gc_inodes;
  1008. ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
  1009. head, i_dirty);
  1010. list_for_each_entry_continue(ii, head, i_dirty) {
  1011. err = nilfs_segctor_scan_file(
  1012. sci, &ii->vfs_inode,
  1013. &nilfs_sc_file_ops);
  1014. if (unlikely(err)) {
  1015. sci->sc_stage.gc_inode_ptr = list_entry(
  1016. ii->i_dirty.prev,
  1017. struct nilfs_inode_info,
  1018. i_dirty);
  1019. goto break_or_fail;
  1020. }
  1021. set_bit(NILFS_I_COLLECTED, &ii->i_state);
  1022. }
  1023. sci->sc_stage.gc_inode_ptr = NULL;
  1024. }
  1025. nilfs_sc_cstage_inc(sci); /* Fall through */
  1026. case NILFS_ST_FILE:
  1027. head = &sci->sc_dirty_files;
  1028. ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
  1029. i_dirty);
  1030. list_for_each_entry_continue(ii, head, i_dirty) {
  1031. clear_bit(NILFS_I_DIRTY, &ii->i_state);
  1032. err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
  1033. &nilfs_sc_file_ops);
  1034. if (unlikely(err)) {
  1035. sci->sc_stage.dirty_file_ptr =
  1036. list_entry(ii->i_dirty.prev,
  1037. struct nilfs_inode_info,
  1038. i_dirty);
  1039. goto break_or_fail;
  1040. }
  1041. /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
  1042. /* XXX: required ? */
  1043. }
  1044. sci->sc_stage.dirty_file_ptr = NULL;
  1045. if (mode == SC_FLUSH_FILE) {
  1046. nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
  1047. return 0;
  1048. }
  1049. nilfs_sc_cstage_inc(sci);
  1050. sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
  1051. /* Fall through */
  1052. case NILFS_ST_IFILE:
  1053. err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
  1054. &nilfs_sc_file_ops);
  1055. if (unlikely(err))
  1056. break;
  1057. nilfs_sc_cstage_inc(sci);
  1058. /* Creating a checkpoint */
  1059. err = nilfs_segctor_create_checkpoint(sci);
  1060. if (unlikely(err))
  1061. break;
  1062. /* Fall through */
  1063. case NILFS_ST_CPFILE:
  1064. err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
  1065. &nilfs_sc_file_ops);
  1066. if (unlikely(err))
  1067. break;
  1068. nilfs_sc_cstage_inc(sci); /* Fall through */
  1069. case NILFS_ST_SUFILE:
  1070. err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
  1071. sci->sc_nfreesegs, &ndone);
  1072. if (unlikely(err)) {
  1073. nilfs_sufile_cancel_freev(nilfs->ns_sufile,
  1074. sci->sc_freesegs, ndone,
  1075. NULL);
  1076. break;
  1077. }
  1078. sci->sc_stage.flags |= NILFS_CF_SUFREED;
  1079. err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
  1080. &nilfs_sc_file_ops);
  1081. if (unlikely(err))
  1082. break;
  1083. nilfs_sc_cstage_inc(sci); /* Fall through */
  1084. case NILFS_ST_DAT:
  1085. dat_stage:
  1086. err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
  1087. &nilfs_sc_dat_ops);
  1088. if (unlikely(err))
  1089. break;
  1090. if (mode == SC_FLUSH_DAT) {
  1091. nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
  1092. return 0;
  1093. }
  1094. nilfs_sc_cstage_inc(sci); /* Fall through */
  1095. case NILFS_ST_SR:
  1096. if (mode == SC_LSEG_SR) {
  1097. /* Appending a super root */
  1098. err = nilfs_segctor_add_super_root(sci);
  1099. if (unlikely(err))
  1100. break;
  1101. }
  1102. /* End of a logical segment */
  1103. sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
  1104. nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
  1105. return 0;
  1106. case NILFS_ST_DSYNC:
  1107. dsync_mode:
  1108. sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
  1109. ii = sci->sc_dsync_inode;
  1110. if (!test_bit(NILFS_I_BUSY, &ii->i_state))
  1111. break;
  1112. err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
  1113. if (unlikely(err))
  1114. break;
  1115. sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
  1116. nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
  1117. return 0;
  1118. case NILFS_ST_DONE:
  1119. return 0;
  1120. default:
  1121. BUG();
  1122. }
  1123. break_or_fail:
  1124. return err;
  1125. }
  1126. /**
  1127. * nilfs_segctor_begin_construction - setup segment buffer to make a new log
  1128. * @sci: nilfs_sc_info
  1129. * @nilfs: nilfs object
  1130. */
  1131. static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
  1132. struct the_nilfs *nilfs)
  1133. {
  1134. struct nilfs_segment_buffer *segbuf, *prev;
  1135. __u64 nextnum;
  1136. int err, alloc = 0;
  1137. segbuf = nilfs_segbuf_new(sci->sc_super);
  1138. if (unlikely(!segbuf))
  1139. return -ENOMEM;
  1140. if (list_empty(&sci->sc_write_logs)) {
  1141. nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
  1142. nilfs->ns_pseg_offset, nilfs);
  1143. if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
  1144. nilfs_shift_to_next_segment(nilfs);
  1145. nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
  1146. }
  1147. segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
  1148. nextnum = nilfs->ns_nextnum;
  1149. if (nilfs->ns_segnum == nilfs->ns_nextnum)
  1150. /* Start from the head of a new full segment */
  1151. alloc++;
  1152. } else {
  1153. /* Continue logs */
  1154. prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
  1155. nilfs_segbuf_map_cont(segbuf, prev);
  1156. segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
  1157. nextnum = prev->sb_nextnum;
  1158. if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
  1159. nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
  1160. segbuf->sb_sum.seg_seq++;
  1161. alloc++;
  1162. }
  1163. }
  1164. err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
  1165. if (err)
  1166. goto failed;
  1167. if (alloc) {
  1168. err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
  1169. if (err)
  1170. goto failed;
  1171. }
  1172. nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
  1173. BUG_ON(!list_empty(&sci->sc_segbufs));
  1174. list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
  1175. sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
  1176. return 0;
  1177. failed:
  1178. nilfs_segbuf_free(segbuf);
  1179. return err;
  1180. }
  1181. static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
  1182. struct the_nilfs *nilfs, int nadd)
  1183. {
  1184. struct nilfs_segment_buffer *segbuf, *prev;
  1185. struct inode *sufile = nilfs->ns_sufile;
  1186. __u64 nextnextnum;
  1187. LIST_HEAD(list);
  1188. int err, ret, i;
  1189. prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
  1190. /*
  1191. * Since the segment specified with nextnum might be allocated during
  1192. * the previous construction, the buffer including its segusage may
  1193. * not be dirty. The following call ensures that the buffer is dirty
  1194. * and will pin the buffer on memory until the sufile is written.
  1195. */
  1196. err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
  1197. if (unlikely(err))
  1198. return err;
  1199. for (i = 0; i < nadd; i++) {
  1200. /* extend segment info */
  1201. err = -ENOMEM;
  1202. segbuf = nilfs_segbuf_new(sci->sc_super);
  1203. if (unlikely(!segbuf))
  1204. goto failed;
  1205. /* map this buffer to region of segment on-disk */
  1206. nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
  1207. sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
  1208. /* allocate the next next full segment */
  1209. err = nilfs_sufile_alloc(sufile, &nextnextnum);
  1210. if (unlikely(err))
  1211. goto failed_segbuf;
  1212. segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
  1213. nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
  1214. list_add_tail(&segbuf->sb_list, &list);
  1215. prev = segbuf;
  1216. }
  1217. list_splice_tail(&list, &sci->sc_segbufs);
  1218. return 0;
  1219. failed_segbuf:
  1220. nilfs_segbuf_free(segbuf);
  1221. failed:
  1222. list_for_each_entry(segbuf, &list, sb_list) {
  1223. ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
  1224. WARN_ON(ret); /* never fails */
  1225. }
  1226. nilfs_destroy_logs(&list);
  1227. return err;
  1228. }
  1229. static void nilfs_free_incomplete_logs(struct list_head *logs,
  1230. struct the_nilfs *nilfs)
  1231. {
  1232. struct nilfs_segment_buffer *segbuf, *prev;
  1233. struct inode *sufile = nilfs->ns_sufile;
  1234. int ret;
  1235. segbuf = NILFS_FIRST_SEGBUF(logs);
  1236. if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
  1237. ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
  1238. WARN_ON(ret); /* never fails */
  1239. }
  1240. if (atomic_read(&segbuf->sb_err)) {
  1241. /* Case 1: The first segment failed */
  1242. if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
  1243. /*
  1244. * Case 1a: Partial segment appended into an existing
  1245. * segment
  1246. */
  1247. nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
  1248. segbuf->sb_fseg_end);
  1249. else /* Case 1b: New full segment */
  1250. set_nilfs_discontinued(nilfs);
  1251. }
  1252. prev = segbuf;
  1253. list_for_each_entry_continue(segbuf, logs, sb_list) {
  1254. if (prev->sb_nextnum != segbuf->sb_nextnum) {
  1255. ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
  1256. WARN_ON(ret); /* never fails */
  1257. }
  1258. if (atomic_read(&segbuf->sb_err) &&
  1259. segbuf->sb_segnum != nilfs->ns_nextnum)
  1260. /* Case 2: extended segment (!= next) failed */
  1261. nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
  1262. prev = segbuf;
  1263. }
  1264. }
  1265. static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
  1266. struct inode *sufile)
  1267. {
  1268. struct nilfs_segment_buffer *segbuf;
  1269. unsigned long live_blocks;
  1270. int ret;
  1271. list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
  1272. live_blocks = segbuf->sb_sum.nblocks +
  1273. (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
  1274. ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
  1275. live_blocks,
  1276. sci->sc_seg_ctime);
  1277. WARN_ON(ret); /* always succeed because the segusage is dirty */
  1278. }
  1279. }
  1280. static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
  1281. {
  1282. struct nilfs_segment_buffer *segbuf;
  1283. int ret;
  1284. segbuf = NILFS_FIRST_SEGBUF(logs);
  1285. ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
  1286. segbuf->sb_pseg_start -
  1287. segbuf->sb_fseg_start, 0);
  1288. WARN_ON(ret); /* always succeed because the segusage is dirty */
  1289. list_for_each_entry_continue(segbuf, logs, sb_list) {
  1290. ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
  1291. 0, 0);
  1292. WARN_ON(ret); /* always succeed */
  1293. }
  1294. }
  1295. static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
  1296. struct nilfs_segment_buffer *last,
  1297. struct inode *sufile)
  1298. {
  1299. struct nilfs_segment_buffer *segbuf = last;
  1300. int ret;
  1301. list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
  1302. sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
  1303. ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
  1304. WARN_ON(ret);
  1305. }
  1306. nilfs_truncate_logs(&sci->sc_segbufs, last);
  1307. }
  1308. static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
  1309. struct the_nilfs *nilfs, int mode)
  1310. {
  1311. struct nilfs_cstage prev_stage = sci->sc_stage;
  1312. int err, nadd = 1;
  1313. /* Collection retry loop */
  1314. for (;;) {
  1315. sci->sc_nblk_this_inc = 0;
  1316. sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
  1317. err = nilfs_segctor_reset_segment_buffer(sci);
  1318. if (unlikely(err))
  1319. goto failed;
  1320. err = nilfs_segctor_collect_blocks(sci, mode);
  1321. sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
  1322. if (!err)
  1323. break;
  1324. if (unlikely(err != -E2BIG))
  1325. goto failed;
  1326. /* The current segment is filled up */
  1327. if (mode != SC_LSEG_SR ||
  1328. nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
  1329. break;
  1330. nilfs_clear_logs(&sci->sc_segbufs);
  1331. if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
  1332. err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
  1333. sci->sc_freesegs,
  1334. sci->sc_nfreesegs,
  1335. NULL);
  1336. WARN_ON(err); /* do not happen */
  1337. sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
  1338. }
  1339. err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
  1340. if (unlikely(err))
  1341. return err;
  1342. nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
  1343. sci->sc_stage = prev_stage;
  1344. }
  1345. nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
  1346. return 0;
  1347. failed:
  1348. return err;
  1349. }
  1350. static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
  1351. struct buffer_head *new_bh)
  1352. {
  1353. BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
  1354. list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
  1355. /* The caller must release old_bh */
  1356. }
  1357. static int
  1358. nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
  1359. struct nilfs_segment_buffer *segbuf,
  1360. int mode)
  1361. {
  1362. struct inode *inode = NULL;
  1363. sector_t blocknr;
  1364. unsigned long nfinfo = segbuf->sb_sum.nfinfo;
  1365. unsigned long nblocks = 0, ndatablk = 0;
  1366. const struct nilfs_sc_operations *sc_op = NULL;
  1367. struct nilfs_segsum_pointer ssp;
  1368. struct nilfs_finfo *finfo = NULL;
  1369. union nilfs_binfo binfo;
  1370. struct buffer_head *bh, *bh_org;
  1371. ino_t ino = 0;
  1372. int err = 0;
  1373. if (!nfinfo)
  1374. goto out;
  1375. blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
  1376. ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
  1377. ssp.offset = sizeof(struct nilfs_segment_summary);
  1378. list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
  1379. if (bh == segbuf->sb_super_root)
  1380. break;
  1381. if (!finfo) {
  1382. finfo = nilfs_segctor_map_segsum_entry(
  1383. sci, &ssp, sizeof(*finfo));
  1384. ino = le64_to_cpu(finfo->fi_ino);
  1385. nblocks = le32_to_cpu(finfo->fi_nblocks);
  1386. ndatablk = le32_to_cpu(finfo->fi_ndatablk);
  1387. inode = bh->b_page->mapping->host;
  1388. if (mode == SC_LSEG_DSYNC)
  1389. sc_op = &nilfs_sc_dsync_ops;
  1390. else if (ino == NILFS_DAT_INO)
  1391. sc_op = &nilfs_sc_dat_ops;
  1392. else /* file blocks */
  1393. sc_op = &nilfs_sc_file_ops;
  1394. }
  1395. bh_org = bh;
  1396. get_bh(bh_org);
  1397. err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
  1398. &binfo);
  1399. if (bh != bh_org)
  1400. nilfs_list_replace_buffer(bh_org, bh);
  1401. brelse(bh_org);
  1402. if (unlikely(err))
  1403. goto failed_bmap;
  1404. if (ndatablk > 0)
  1405. sc_op->write_data_binfo(sci, &ssp, &binfo);
  1406. else
  1407. sc_op->write_node_binfo(sci, &ssp, &binfo);
  1408. blocknr++;
  1409. if (--nblocks == 0) {
  1410. finfo = NULL;
  1411. if (--nfinfo == 0)
  1412. break;
  1413. } else if (ndatablk > 0)
  1414. ndatablk--;
  1415. }
  1416. out:
  1417. return 0;
  1418. failed_bmap:
  1419. return err;
  1420. }
  1421. static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
  1422. {
  1423. struct nilfs_segment_buffer *segbuf;
  1424. int err;
  1425. list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
  1426. err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
  1427. if (unlikely(err))
  1428. return err;
  1429. nilfs_segbuf_fill_in_segsum(segbuf);
  1430. }
  1431. return 0;
  1432. }
  1433. static void nilfs_begin_page_io(struct page *page)
  1434. {
  1435. if (!page || PageWriteback(page))
  1436. /*
  1437. * For split b-tree node pages, this function may be called
  1438. * twice. We ignore the 2nd or later calls by this check.
  1439. */
  1440. return;
  1441. lock_page(page);
  1442. clear_page_dirty_for_io(page);
  1443. set_page_writeback(page);
  1444. unlock_page(page);
  1445. }
  1446. static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
  1447. {
  1448. struct nilfs_segment_buffer *segbuf;
  1449. struct page *bd_page = NULL, *fs_page = NULL;
  1450. list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
  1451. struct buffer_head *bh;
  1452. list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
  1453. b_assoc_buffers) {
  1454. if (bh->b_page != bd_page) {
  1455. if (bd_page) {
  1456. lock_page(bd_page);
  1457. clear_page_dirty_for_io(bd_page);
  1458. set_page_writeback(bd_page);
  1459. unlock_page(bd_page);
  1460. }
  1461. bd_page = bh->b_page;
  1462. }
  1463. }
  1464. list_for_each_entry(bh, &segbuf->sb_payload_buffers,
  1465. b_assoc_buffers) {
  1466. set_buffer_async_write(bh);
  1467. if (bh == segbuf->sb_super_root) {
  1468. if (bh->b_page != bd_page) {
  1469. lock_page(bd_page);
  1470. clear_page_dirty_for_io(bd_page);
  1471. set_page_writeback(bd_page);
  1472. unlock_page(bd_page);
  1473. bd_page = bh->b_page;
  1474. }
  1475. break;
  1476. }
  1477. if (bh->b_page != fs_page) {
  1478. nilfs_begin_page_io(fs_page);
  1479. fs_page = bh->b_page;
  1480. }
  1481. }
  1482. }
  1483. if (bd_page) {
  1484. lock_page(bd_page);
  1485. clear_page_dirty_for_io(bd_page);
  1486. set_page_writeback(bd_page);
  1487. unlock_page(bd_page);
  1488. }
  1489. nilfs_begin_page_io(fs_page);
  1490. }
  1491. static int nilfs_segctor_write(struct nilfs_sc_info *sci,
  1492. struct the_nilfs *nilfs)
  1493. {
  1494. int ret;
  1495. ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
  1496. list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
  1497. return ret;
  1498. }
  1499. static void nilfs_end_page_io(struct page *page, int err)
  1500. {
  1501. if (!page)
  1502. return;
  1503. if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
  1504. /*
  1505. * For b-tree node pages, this function may be called twice
  1506. * or more because they might be split in a segment.
  1507. */
  1508. if (PageDirty(page)) {
  1509. /*
  1510. * For pages holding split b-tree node buffers, dirty
  1511. * flag on the buffers may be cleared discretely.
  1512. * In that case, the page is once redirtied for
  1513. * remaining buffers, and it must be cancelled if
  1514. * all the buffers get cleaned later.
  1515. */
  1516. lock_page(page);
  1517. if (nilfs_page_buffers_clean(page))
  1518. __nilfs_clear_page_dirty(page);
  1519. unlock_page(page);
  1520. }
  1521. return;
  1522. }
  1523. if (!err) {
  1524. if (!nilfs_page_buffers_clean(page))
  1525. __set_page_dirty_nobuffers(page);
  1526. ClearPageError(page);
  1527. } else {
  1528. __set_page_dirty_nobuffers(page);
  1529. SetPageError(page);
  1530. }
  1531. end_page_writeback(page);
  1532. }
  1533. static void nilfs_abort_logs(struct list_head *logs, int err)
  1534. {
  1535. struct nilfs_segment_buffer *segbuf;
  1536. struct page *bd_page = NULL, *fs_page = NULL;
  1537. struct buffer_head *bh;
  1538. if (list_empty(logs))
  1539. return;
  1540. list_for_each_entry(segbuf, logs, sb_list) {
  1541. list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
  1542. b_assoc_buffers) {
  1543. if (bh->b_page != bd_page) {
  1544. if (bd_page)
  1545. end_page_writeback(bd_page);
  1546. bd_page = bh->b_page;
  1547. }
  1548. }
  1549. list_for_each_entry(bh, &segbuf->sb_payload_buffers,
  1550. b_assoc_buffers) {
  1551. clear_buffer_async_write(bh);
  1552. if (bh == segbuf->sb_super_root) {
  1553. if (bh->b_page != bd_page) {
  1554. end_page_writeback(bd_page);
  1555. bd_page = bh->b_page;
  1556. }
  1557. break;
  1558. }
  1559. if (bh->b_page != fs_page) {
  1560. nilfs_end_page_io(fs_page, err);
  1561. fs_page = bh->b_page;
  1562. }
  1563. }
  1564. }
  1565. if (bd_page)
  1566. end_page_writeback(bd_page);
  1567. nilfs_end_page_io(fs_page, err);
  1568. }
  1569. static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
  1570. struct the_nilfs *nilfs, int err)
  1571. {
  1572. LIST_HEAD(logs);
  1573. int ret;
  1574. list_splice_tail_init(&sci->sc_write_logs, &logs);
  1575. ret = nilfs_wait_on_logs(&logs);
  1576. nilfs_abort_logs(&logs, ret ? : err);
  1577. list_splice_tail_init(&sci->sc_segbufs, &logs);
  1578. nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
  1579. nilfs_free_incomplete_logs(&logs, nilfs);
  1580. if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
  1581. ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
  1582. sci->sc_freesegs,
  1583. sci->sc_nfreesegs,
  1584. NULL);
  1585. WARN_ON(ret); /* do not happen */
  1586. }
  1587. nilfs_destroy_logs(&logs);
  1588. }
  1589. static void nilfs_set_next_segment(struct the_nilfs *nilfs,
  1590. struct nilfs_segment_buffer *segbuf)
  1591. {
  1592. nilfs->ns_segnum = segbuf->sb_segnum;
  1593. nilfs->ns_nextnum = segbuf->sb_nextnum;
  1594. nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
  1595. + segbuf->sb_sum.nblocks;
  1596. nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
  1597. nilfs->ns_ctime = segbuf->sb_sum.ctime;
  1598. }
  1599. static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
  1600. {
  1601. struct nilfs_segment_buffer *segbuf;
  1602. struct page *bd_page = NULL, *fs_page = NULL;
  1603. struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
  1604. int update_sr = false;
  1605. list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
  1606. struct buffer_head *bh;
  1607. list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
  1608. b_assoc_buffers) {
  1609. set_buffer_uptodate(bh);
  1610. clear_buffer_dirty(bh);
  1611. if (bh->b_page != bd_page) {
  1612. if (bd_page)
  1613. end_page_writeback(bd_page);
  1614. bd_page = bh->b_page;
  1615. }
  1616. }
  1617. /*
  1618. * We assume that the buffers which belong to the same page
  1619. * continue over the buffer list.
  1620. * Under this assumption, the last BHs of pages is
  1621. * identifiable by the discontinuity of bh->b_page
  1622. * (page != fs_page).
  1623. *
  1624. * For B-tree node blocks, however, this assumption is not
  1625. * guaranteed. The cleanup code of B-tree node pages needs
  1626. * special care.
  1627. */
  1628. list_for_each_entry(bh, &segbuf->sb_payload_buffers,
  1629. b_assoc_buffers) {
  1630. const unsigned long set_bits = BIT(BH_Uptodate);
  1631. const unsigned long clear_bits =
  1632. (BIT(BH_Dirty) | BIT(BH_Async_Write) |
  1633. BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
  1634. BIT(BH_NILFS_Redirected));
  1635. set_mask_bits(&bh->b_state, clear_bits, set_bits);
  1636. if (bh == segbuf->sb_super_root) {
  1637. if (bh->b_page != bd_page) {
  1638. end_page_writeback(bd_page);
  1639. bd_page = bh->b_page;
  1640. }
  1641. update_sr = true;
  1642. break;
  1643. }
  1644. if (bh->b_page != fs_page) {
  1645. nilfs_end_page_io(fs_page, 0);
  1646. fs_page = bh->b_page;
  1647. }
  1648. }
  1649. if (!nilfs_segbuf_simplex(segbuf)) {
  1650. if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
  1651. set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
  1652. sci->sc_lseg_stime = jiffies;
  1653. }
  1654. if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
  1655. clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
  1656. }
  1657. }
  1658. /*
  1659. * Since pages may continue over multiple segment buffers,
  1660. * end of the last page must be checked outside of the loop.
  1661. */
  1662. if (bd_page)
  1663. end_page_writeback(bd_page);
  1664. nilfs_end_page_io(fs_page, 0);
  1665. nilfs_drop_collected_inodes(&sci->sc_dirty_files);
  1666. if (nilfs_doing_gc())
  1667. nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
  1668. else
  1669. nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
  1670. sci->sc_nblk_inc += sci->sc_nblk_this_inc;
  1671. segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
  1672. nilfs_set_next_segment(nilfs, segbuf);
  1673. if (update_sr) {
  1674. nilfs->ns_flushed_device = 0;
  1675. nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
  1676. segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
  1677. clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
  1678. clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
  1679. set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
  1680. nilfs_segctor_clear_metadata_dirty(sci);
  1681. } else
  1682. clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
  1683. }
  1684. static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
  1685. {
  1686. int ret;
  1687. ret = nilfs_wait_on_logs(&sci->sc_write_logs);
  1688. if (!ret) {
  1689. nilfs_segctor_complete_write(sci);
  1690. nilfs_destroy_logs(&sci->sc_write_logs);
  1691. }
  1692. return ret;
  1693. }
  1694. static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
  1695. struct the_nilfs *nilfs)
  1696. {
  1697. struct nilfs_inode_info *ii, *n;
  1698. struct inode *ifile = sci->sc_root->ifile;
  1699. spin_lock(&nilfs->ns_inode_lock);
  1700. retry:
  1701. list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
  1702. if (!ii->i_bh) {
  1703. struct buffer_head *ibh;
  1704. int err;
  1705. spin_unlock(&nilfs->ns_inode_lock);
  1706. err = nilfs_ifile_get_inode_block(
  1707. ifile, ii->vfs_inode.i_ino, &ibh);
  1708. if (unlikely(err)) {
  1709. nilfs_msg(sci->sc_super, KERN_WARNING,
  1710. "log writer: error %d getting inode block (ino=%lu)",
  1711. err, ii->vfs_inode.i_ino);
  1712. return err;
  1713. }
  1714. mark_buffer_dirty(ibh);
  1715. nilfs_mdt_mark_dirty(ifile);
  1716. spin_lock(&nilfs->ns_inode_lock);
  1717. if (likely(!ii->i_bh))
  1718. ii->i_bh = ibh;
  1719. else
  1720. brelse(ibh);
  1721. goto retry;
  1722. }
  1723. clear_bit(NILFS_I_QUEUED, &ii->i_state);
  1724. set_bit(NILFS_I_BUSY, &ii->i_state);
  1725. list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
  1726. }
  1727. spin_unlock(&nilfs->ns_inode_lock);
  1728. return 0;
  1729. }
  1730. static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
  1731. struct the_nilfs *nilfs)
  1732. {
  1733. struct nilfs_inode_info *ii, *n;
  1734. int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
  1735. int defer_iput = false;
  1736. spin_lock(&nilfs->ns_inode_lock);
  1737. list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
  1738. if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
  1739. test_bit(NILFS_I_DIRTY, &ii->i_state))
  1740. continue;
  1741. clear_bit(NILFS_I_BUSY, &ii->i_state);
  1742. brelse(ii->i_bh);
  1743. ii->i_bh = NULL;
  1744. list_del_init(&ii->i_dirty);
  1745. if (!ii->vfs_inode.i_nlink || during_mount) {
  1746. /*
  1747. * Defer calling iput() to avoid deadlocks if
  1748. * i_nlink == 0 or mount is not yet finished.
  1749. */
  1750. list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
  1751. defer_iput = true;
  1752. } else {
  1753. spin_unlock(&nilfs->ns_inode_lock);
  1754. iput(&ii->vfs_inode);
  1755. spin_lock(&nilfs->ns_inode_lock);
  1756. }
  1757. }
  1758. spin_unlock(&nilfs->ns_inode_lock);
  1759. if (defer_iput)
  1760. schedule_work(&sci->sc_iput_work);
  1761. }
  1762. /*
  1763. * Main procedure of segment constructor
  1764. */
  1765. static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
  1766. {
  1767. struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
  1768. int err;
  1769. nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
  1770. sci->sc_cno = nilfs->ns_cno;
  1771. err = nilfs_segctor_collect_dirty_files(sci, nilfs);
  1772. if (unlikely(err))
  1773. goto out;
  1774. if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
  1775. set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
  1776. if (nilfs_segctor_clean(sci))
  1777. goto out;
  1778. do {
  1779. sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
  1780. err = nilfs_segctor_begin_construction(sci, nilfs);
  1781. if (unlikely(err))
  1782. goto out;
  1783. /* Update time stamp */
  1784. sci->sc_seg_ctime = get_seconds();
  1785. err = nilfs_segctor_collect(sci, nilfs, mode);
  1786. if (unlikely(err))
  1787. goto failed;
  1788. /* Avoid empty segment */
  1789. if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
  1790. nilfs_segbuf_empty(sci->sc_curseg)) {
  1791. nilfs_segctor_abort_construction(sci, nilfs, 1);
  1792. goto out;
  1793. }
  1794. err = nilfs_segctor_assign(sci, mode);
  1795. if (unlikely(err))
  1796. goto failed;
  1797. if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
  1798. nilfs_segctor_fill_in_file_bmap(sci);
  1799. if (mode == SC_LSEG_SR &&
  1800. nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
  1801. err = nilfs_segctor_fill_in_checkpoint(sci);
  1802. if (unlikely(err))
  1803. goto failed_to_write;
  1804. nilfs_segctor_fill_in_super_root(sci, nilfs);
  1805. }
  1806. nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
  1807. /* Write partial segments */
  1808. nilfs_segctor_prepare_write(sci);
  1809. nilfs_add_checksums_on_logs(&sci->sc_segbufs,
  1810. nilfs->ns_crc_seed);
  1811. err = nilfs_segctor_write(sci, nilfs);
  1812. if (unlikely(err))
  1813. goto failed_to_write;
  1814. if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
  1815. nilfs->ns_blocksize_bits != PAGE_SHIFT) {
  1816. /*
  1817. * At this point, we avoid double buffering
  1818. * for blocksize < pagesize because page dirty
  1819. * flag is turned off during write and dirty
  1820. * buffers are not properly collected for
  1821. * pages crossing over segments.
  1822. */
  1823. err = nilfs_segctor_wait(sci);
  1824. if (err)
  1825. goto failed_to_write;
  1826. }
  1827. } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
  1828. out:
  1829. nilfs_segctor_drop_written_files(sci, nilfs);
  1830. return err;
  1831. failed_to_write:
  1832. if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
  1833. nilfs_redirty_inodes(&sci->sc_dirty_files);
  1834. failed:
  1835. if (nilfs_doing_gc())
  1836. nilfs_redirty_inodes(&sci->sc_gc_inodes);
  1837. nilfs_segctor_abort_construction(sci, nilfs, err);
  1838. goto out;
  1839. }
  1840. /**
  1841. * nilfs_segctor_start_timer - set timer of background write
  1842. * @sci: nilfs_sc_info
  1843. *
  1844. * If the timer has already been set, it ignores the new request.
  1845. * This function MUST be called within a section locking the segment
  1846. * semaphore.
  1847. */
  1848. static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
  1849. {
  1850. spin_lock(&sci->sc_state_lock);
  1851. if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
  1852. sci->sc_timer.expires = jiffies + sci->sc_interval;
  1853. add_timer(&sci->sc_timer);
  1854. sci->sc_state |= NILFS_SEGCTOR_COMMIT;
  1855. }
  1856. spin_unlock(&sci->sc_state_lock);
  1857. }
  1858. static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
  1859. {
  1860. spin_lock(&sci->sc_state_lock);
  1861. if (!(sci->sc_flush_request & BIT(bn))) {
  1862. unsigned long prev_req = sci->sc_flush_request;
  1863. sci->sc_flush_request |= BIT(bn);
  1864. if (!prev_req)
  1865. wake_up(&sci->sc_wait_daemon);
  1866. }
  1867. spin_unlock(&sci->sc_state_lock);
  1868. }
  1869. /**
  1870. * nilfs_flush_segment - trigger a segment construction for resource control
  1871. * @sb: super block
  1872. * @ino: inode number of the file to be flushed out.
  1873. */
  1874. void nilfs_flush_segment(struct super_block *sb, ino_t ino)
  1875. {
  1876. struct the_nilfs *nilfs = sb->s_fs_info;
  1877. struct nilfs_sc_info *sci = nilfs->ns_writer;
  1878. if (!sci || nilfs_doing_construction())
  1879. return;
  1880. nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
  1881. /* assign bit 0 to data files */
  1882. }
  1883. struct nilfs_segctor_wait_request {
  1884. wait_queue_entry_t wq;
  1885. __u32 seq;
  1886. int err;
  1887. atomic_t done;
  1888. };
  1889. static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
  1890. {
  1891. struct nilfs_segctor_wait_request wait_req;
  1892. int err = 0;
  1893. spin_lock(&sci->sc_state_lock);
  1894. init_wait(&wait_req.wq);
  1895. wait_req.err = 0;
  1896. atomic_set(&wait_req.done, 0);
  1897. wait_req.seq = ++sci->sc_seq_request;
  1898. spin_unlock(&sci->sc_state_lock);
  1899. init_waitqueue_entry(&wait_req.wq, current);
  1900. add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
  1901. set_current_state(TASK_INTERRUPTIBLE);
  1902. wake_up(&sci->sc_wait_daemon);
  1903. for (;;) {
  1904. if (atomic_read(&wait_req.done)) {
  1905. err = wait_req.err;
  1906. break;
  1907. }
  1908. if (!signal_pending(current)) {
  1909. schedule();
  1910. continue;
  1911. }
  1912. err = -ERESTARTSYS;
  1913. break;
  1914. }
  1915. finish_wait(&sci->sc_wait_request, &wait_req.wq);
  1916. return err;
  1917. }
  1918. static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
  1919. {
  1920. struct nilfs_segctor_wait_request *wrq, *n;
  1921. unsigned long flags;
  1922. spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
  1923. list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
  1924. if (!atomic_read(&wrq->done) &&
  1925. nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
  1926. wrq->err = err;
  1927. atomic_set(&wrq->done, 1);
  1928. }
  1929. if (atomic_read(&wrq->done)) {
  1930. wrq->wq.func(&wrq->wq,
  1931. TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
  1932. 0, NULL);
  1933. }
  1934. }
  1935. spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
  1936. }
  1937. /**
  1938. * nilfs_construct_segment - construct a logical segment
  1939. * @sb: super block
  1940. *
  1941. * Return Value: On success, 0 is retured. On errors, one of the following
  1942. * negative error code is returned.
  1943. *
  1944. * %-EROFS - Read only filesystem.
  1945. *
  1946. * %-EIO - I/O error
  1947. *
  1948. * %-ENOSPC - No space left on device (only in a panic state).
  1949. *
  1950. * %-ERESTARTSYS - Interrupted.
  1951. *
  1952. * %-ENOMEM - Insufficient memory available.
  1953. */
  1954. int nilfs_construct_segment(struct super_block *sb)
  1955. {
  1956. struct the_nilfs *nilfs = sb->s_fs_info;
  1957. struct nilfs_sc_info *sci = nilfs->ns_writer;
  1958. struct nilfs_transaction_info *ti;
  1959. int err;
  1960. if (!sci)
  1961. return -EROFS;
  1962. /* A call inside transactions causes a deadlock. */
  1963. BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
  1964. err = nilfs_segctor_sync(sci);
  1965. return err;
  1966. }
  1967. /**
  1968. * nilfs_construct_dsync_segment - construct a data-only logical segment
  1969. * @sb: super block
  1970. * @inode: inode whose data blocks should be written out
  1971. * @start: start byte offset
  1972. * @end: end byte offset (inclusive)
  1973. *
  1974. * Return Value: On success, 0 is retured. On errors, one of the following
  1975. * negative error code is returned.
  1976. *
  1977. * %-EROFS - Read only filesystem.
  1978. *
  1979. * %-EIO - I/O error
  1980. *
  1981. * %-ENOSPC - No space left on device (only in a panic state).
  1982. *
  1983. * %-ERESTARTSYS - Interrupted.
  1984. *
  1985. * %-ENOMEM - Insufficient memory available.
  1986. */
  1987. int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
  1988. loff_t start, loff_t end)
  1989. {
  1990. struct the_nilfs *nilfs = sb->s_fs_info;
  1991. struct nilfs_sc_info *sci = nilfs->ns_writer;
  1992. struct nilfs_inode_info *ii;
  1993. struct nilfs_transaction_info ti;
  1994. int err = 0;
  1995. if (!sci)
  1996. return -EROFS;
  1997. nilfs_transaction_lock(sb, &ti, 0);
  1998. ii = NILFS_I(inode);
  1999. if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
  2000. nilfs_test_opt(nilfs, STRICT_ORDER) ||
  2001. test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
  2002. nilfs_discontinued(nilfs)) {
  2003. nilfs_transaction_unlock(sb);
  2004. err = nilfs_segctor_sync(sci);
  2005. return err;
  2006. }
  2007. spin_lock(&nilfs->ns_inode_lock);
  2008. if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
  2009. !test_bit(NILFS_I_BUSY, &ii->i_state)) {
  2010. spin_unlock(&nilfs->ns_inode_lock);
  2011. nilfs_transaction_unlock(sb);
  2012. return 0;
  2013. }
  2014. spin_unlock(&nilfs->ns_inode_lock);
  2015. sci->sc_dsync_inode = ii;
  2016. sci->sc_dsync_start = start;
  2017. sci->sc_dsync_end = end;
  2018. err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
  2019. if (!err)
  2020. nilfs->ns_flushed_device = 0;
  2021. nilfs_transaction_unlock(sb);
  2022. return err;
  2023. }
  2024. #define FLUSH_FILE_BIT (0x1) /* data file only */
  2025. #define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
  2026. /**
  2027. * nilfs_segctor_accept - record accepted sequence count of log-write requests
  2028. * @sci: segment constructor object
  2029. */
  2030. static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
  2031. {
  2032. spin_lock(&sci->sc_state_lock);
  2033. sci->sc_seq_accepted = sci->sc_seq_request;
  2034. spin_unlock(&sci->sc_state_lock);
  2035. del_timer_sync(&sci->sc_timer);
  2036. }
  2037. /**
  2038. * nilfs_segctor_notify - notify the result of request to caller threads
  2039. * @sci: segment constructor object
  2040. * @mode: mode of log forming
  2041. * @err: error code to be notified
  2042. */
  2043. static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
  2044. {
  2045. /* Clear requests (even when the construction failed) */
  2046. spin_lock(&sci->sc_state_lock);
  2047. if (mode == SC_LSEG_SR) {
  2048. sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
  2049. sci->sc_seq_done = sci->sc_seq_accepted;
  2050. nilfs_segctor_wakeup(sci, err);
  2051. sci->sc_flush_request = 0;
  2052. } else {
  2053. if (mode == SC_FLUSH_FILE)
  2054. sci->sc_flush_request &= ~FLUSH_FILE_BIT;
  2055. else if (mode == SC_FLUSH_DAT)
  2056. sci->sc_flush_request &= ~FLUSH_DAT_BIT;
  2057. /* re-enable timer if checkpoint creation was not done */
  2058. if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
  2059. time_before(jiffies, sci->sc_timer.expires))
  2060. add_timer(&sci->sc_timer);
  2061. }
  2062. spin_unlock(&sci->sc_state_lock);
  2063. }
  2064. /**
  2065. * nilfs_segctor_construct - form logs and write them to disk
  2066. * @sci: segment constructor object
  2067. * @mode: mode of log forming
  2068. */
  2069. static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
  2070. {
  2071. struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
  2072. struct nilfs_super_block **sbp;
  2073. int err = 0;
  2074. nilfs_segctor_accept(sci);
  2075. if (nilfs_discontinued(nilfs))
  2076. mode = SC_LSEG_SR;
  2077. if (!nilfs_segctor_confirm(sci))
  2078. err = nilfs_segctor_do_construct(sci, mode);
  2079. if (likely(!err)) {
  2080. if (mode != SC_FLUSH_DAT)
  2081. atomic_set(&nilfs->ns_ndirtyblks, 0);
  2082. if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
  2083. nilfs_discontinued(nilfs)) {
  2084. down_write(&nilfs->ns_sem);
  2085. err = -EIO;
  2086. sbp = nilfs_prepare_super(sci->sc_super,
  2087. nilfs_sb_will_flip(nilfs));
  2088. if (likely(sbp)) {
  2089. nilfs_set_log_cursor(sbp[0], nilfs);
  2090. err = nilfs_commit_super(sci->sc_super,
  2091. NILFS_SB_COMMIT);
  2092. }
  2093. up_write(&nilfs->ns_sem);
  2094. }
  2095. }
  2096. nilfs_segctor_notify(sci, mode, err);
  2097. return err;
  2098. }
  2099. static void nilfs_construction_timeout(unsigned long data)
  2100. {
  2101. struct task_struct *p = (struct task_struct *)data;
  2102. wake_up_process(p);
  2103. }
  2104. static void
  2105. nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
  2106. {
  2107. struct nilfs_inode_info *ii, *n;
  2108. list_for_each_entry_safe(ii, n, head, i_dirty) {
  2109. if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
  2110. continue;
  2111. list_del_init(&ii->i_dirty);
  2112. truncate_inode_pages(&ii->vfs_inode.i_data, 0);
  2113. nilfs_btnode_cache_clear(&ii->i_btnode_cache);
  2114. iput(&ii->vfs_inode);
  2115. }
  2116. }
  2117. int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
  2118. void **kbufs)
  2119. {
  2120. struct the_nilfs *nilfs = sb->s_fs_info;
  2121. struct nilfs_sc_info *sci = nilfs->ns_writer;
  2122. struct nilfs_transaction_info ti;
  2123. int err;
  2124. if (unlikely(!sci))
  2125. return -EROFS;
  2126. nilfs_transaction_lock(sb, &ti, 1);
  2127. err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
  2128. if (unlikely(err))
  2129. goto out_unlock;
  2130. err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
  2131. if (unlikely(err)) {
  2132. nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
  2133. goto out_unlock;
  2134. }
  2135. sci->sc_freesegs = kbufs[4];
  2136. sci->sc_nfreesegs = argv[4].v_nmembs;
  2137. list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
  2138. for (;;) {
  2139. err = nilfs_segctor_construct(sci, SC_LSEG_SR);
  2140. nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
  2141. if (likely(!err))
  2142. break;
  2143. nilfs_msg(sb, KERN_WARNING, "error %d cleaning segments", err);
  2144. set_current_state(TASK_INTERRUPTIBLE);
  2145. schedule_timeout(sci->sc_interval);
  2146. }
  2147. if (nilfs_test_opt(nilfs, DISCARD)) {
  2148. int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
  2149. sci->sc_nfreesegs);
  2150. if (ret) {
  2151. nilfs_msg(sb, KERN_WARNING,
  2152. "error %d on discard request, turning discards off for the device",
  2153. ret);
  2154. nilfs_clear_opt(nilfs, DISCARD);
  2155. }
  2156. }
  2157. out_unlock:
  2158. sci->sc_freesegs = NULL;
  2159. sci->sc_nfreesegs = 0;
  2160. nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
  2161. nilfs_transaction_unlock(sb);
  2162. return err;
  2163. }
  2164. static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
  2165. {
  2166. struct nilfs_transaction_info ti;
  2167. nilfs_transaction_lock(sci->sc_super, &ti, 0);
  2168. nilfs_segctor_construct(sci, mode);
  2169. /*
  2170. * Unclosed segment should be retried. We do this using sc_timer.
  2171. * Timeout of sc_timer will invoke complete construction which leads
  2172. * to close the current logical segment.
  2173. */
  2174. if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
  2175. nilfs_segctor_start_timer(sci);
  2176. nilfs_transaction_unlock(sci->sc_super);
  2177. }
  2178. static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
  2179. {
  2180. int mode = 0;
  2181. spin_lock(&sci->sc_state_lock);
  2182. mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
  2183. SC_FLUSH_DAT : SC_FLUSH_FILE;
  2184. spin_unlock(&sci->sc_state_lock);
  2185. if (mode) {
  2186. nilfs_segctor_do_construct(sci, mode);
  2187. spin_lock(&sci->sc_state_lock);
  2188. sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
  2189. ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
  2190. spin_unlock(&sci->sc_state_lock);
  2191. }
  2192. clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
  2193. }
  2194. static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
  2195. {
  2196. if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
  2197. time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
  2198. if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
  2199. return SC_FLUSH_FILE;
  2200. else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
  2201. return SC_FLUSH_DAT;
  2202. }
  2203. return SC_LSEG_SR;
  2204. }
  2205. /**
  2206. * nilfs_segctor_thread - main loop of the segment constructor thread.
  2207. * @arg: pointer to a struct nilfs_sc_info.
  2208. *
  2209. * nilfs_segctor_thread() initializes a timer and serves as a daemon
  2210. * to execute segment constructions.
  2211. */
  2212. static int nilfs_segctor_thread(void *arg)
  2213. {
  2214. struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
  2215. struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
  2216. int timeout = 0;
  2217. sci->sc_timer.data = (unsigned long)current;
  2218. sci->sc_timer.function = nilfs_construction_timeout;
  2219. /* start sync. */
  2220. sci->sc_task = current;
  2221. wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
  2222. nilfs_msg(sci->sc_super, KERN_INFO,
  2223. "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
  2224. sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
  2225. spin_lock(&sci->sc_state_lock);
  2226. loop:
  2227. for (;;) {
  2228. int mode;
  2229. if (sci->sc_state & NILFS_SEGCTOR_QUIT)
  2230. goto end_thread;
  2231. if (timeout || sci->sc_seq_request != sci->sc_seq_done)
  2232. mode = SC_LSEG_SR;
  2233. else if (sci->sc_flush_request)
  2234. mode = nilfs_segctor_flush_mode(sci);
  2235. else
  2236. break;
  2237. spin_unlock(&sci->sc_state_lock);
  2238. nilfs_segctor_thread_construct(sci, mode);
  2239. spin_lock(&sci->sc_state_lock);
  2240. timeout = 0;
  2241. }
  2242. if (freezing(current)) {
  2243. spin_unlock(&sci->sc_state_lock);
  2244. try_to_freeze();
  2245. spin_lock(&sci->sc_state_lock);
  2246. } else {
  2247. DEFINE_WAIT(wait);
  2248. int should_sleep = 1;
  2249. prepare_to_wait(&sci->sc_wait_daemon, &wait,
  2250. TASK_INTERRUPTIBLE);
  2251. if (sci->sc_seq_request != sci->sc_seq_done)
  2252. should_sleep = 0;
  2253. else if (sci->sc_flush_request)
  2254. should_sleep = 0;
  2255. else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
  2256. should_sleep = time_before(jiffies,
  2257. sci->sc_timer.expires);
  2258. if (should_sleep) {
  2259. spin_unlock(&sci->sc_state_lock);
  2260. schedule();
  2261. spin_lock(&sci->sc_state_lock);
  2262. }
  2263. finish_wait(&sci->sc_wait_daemon, &wait);
  2264. timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
  2265. time_after_eq(jiffies, sci->sc_timer.expires));
  2266. if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
  2267. set_nilfs_discontinued(nilfs);
  2268. }
  2269. goto loop;
  2270. end_thread:
  2271. spin_unlock(&sci->sc_state_lock);
  2272. /* end sync. */
  2273. sci->sc_task = NULL;
  2274. wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
  2275. return 0;
  2276. }
  2277. static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
  2278. {
  2279. struct task_struct *t;
  2280. t = kthread_run(nilfs_segctor_thread, sci, "segctord");
  2281. if (IS_ERR(t)) {
  2282. int err = PTR_ERR(t);
  2283. nilfs_msg(sci->sc_super, KERN_ERR,
  2284. "error %d creating segctord thread", err);
  2285. return err;
  2286. }
  2287. wait_event(sci->sc_wait_task, sci->sc_task != NULL);
  2288. return 0;
  2289. }
  2290. static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
  2291. __acquires(&sci->sc_state_lock)
  2292. __releases(&sci->sc_state_lock)
  2293. {
  2294. sci->sc_state |= NILFS_SEGCTOR_QUIT;
  2295. while (sci->sc_task) {
  2296. wake_up(&sci->sc_wait_daemon);
  2297. spin_unlock(&sci->sc_state_lock);
  2298. wait_event(sci->sc_wait_task, sci->sc_task == NULL);
  2299. spin_lock(&sci->sc_state_lock);
  2300. }
  2301. }
  2302. /*
  2303. * Setup & clean-up functions
  2304. */
  2305. static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
  2306. struct nilfs_root *root)
  2307. {
  2308. struct the_nilfs *nilfs = sb->s_fs_info;
  2309. struct nilfs_sc_info *sci;
  2310. sci = kzalloc(sizeof(*sci), GFP_KERNEL);
  2311. if (!sci)
  2312. return NULL;
  2313. sci->sc_super = sb;
  2314. nilfs_get_root(root);
  2315. sci->sc_root = root;
  2316. init_waitqueue_head(&sci->sc_wait_request);
  2317. init_waitqueue_head(&sci->sc_wait_daemon);
  2318. init_waitqueue_head(&sci->sc_wait_task);
  2319. spin_lock_init(&sci->sc_state_lock);
  2320. INIT_LIST_HEAD(&sci->sc_dirty_files);
  2321. INIT_LIST_HEAD(&sci->sc_segbufs);
  2322. INIT_LIST_HEAD(&sci->sc_write_logs);
  2323. INIT_LIST_HEAD(&sci->sc_gc_inodes);
  2324. INIT_LIST_HEAD(&sci->sc_iput_queue);
  2325. INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
  2326. init_timer(&sci->sc_timer);
  2327. sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
  2328. sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
  2329. sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
  2330. if (nilfs->ns_interval)
  2331. sci->sc_interval = HZ * nilfs->ns_interval;
  2332. if (nilfs->ns_watermark)
  2333. sci->sc_watermark = nilfs->ns_watermark;
  2334. return sci;
  2335. }
  2336. static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
  2337. {
  2338. int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
  2339. /*
  2340. * The segctord thread was stopped and its timer was removed.
  2341. * But some tasks remain.
  2342. */
  2343. do {
  2344. struct nilfs_transaction_info ti;
  2345. nilfs_transaction_lock(sci->sc_super, &ti, 0);
  2346. ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
  2347. nilfs_transaction_unlock(sci->sc_super);
  2348. flush_work(&sci->sc_iput_work);
  2349. } while (ret && retrycount-- > 0);
  2350. }
  2351. /**
  2352. * nilfs_segctor_destroy - destroy the segment constructor.
  2353. * @sci: nilfs_sc_info
  2354. *
  2355. * nilfs_segctor_destroy() kills the segctord thread and frees
  2356. * the nilfs_sc_info struct.
  2357. * Caller must hold the segment semaphore.
  2358. */
  2359. static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
  2360. {
  2361. struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
  2362. int flag;
  2363. up_write(&nilfs->ns_segctor_sem);
  2364. spin_lock(&sci->sc_state_lock);
  2365. nilfs_segctor_kill_thread(sci);
  2366. flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
  2367. || sci->sc_seq_request != sci->sc_seq_done);
  2368. spin_unlock(&sci->sc_state_lock);
  2369. if (flush_work(&sci->sc_iput_work))
  2370. flag = true;
  2371. if (flag || !nilfs_segctor_confirm(sci))
  2372. nilfs_segctor_write_out(sci);
  2373. if (!list_empty(&sci->sc_dirty_files)) {
  2374. nilfs_msg(sci->sc_super, KERN_WARNING,
  2375. "disposed unprocessed dirty file(s) when stopping log writer");
  2376. nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
  2377. }
  2378. if (!list_empty(&sci->sc_iput_queue)) {
  2379. nilfs_msg(sci->sc_super, KERN_WARNING,
  2380. "disposed unprocessed inode(s) in iput queue when stopping log writer");
  2381. nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
  2382. }
  2383. WARN_ON(!list_empty(&sci->sc_segbufs));
  2384. WARN_ON(!list_empty(&sci->sc_write_logs));
  2385. nilfs_put_root(sci->sc_root);
  2386. down_write(&nilfs->ns_segctor_sem);
  2387. del_timer_sync(&sci->sc_timer);
  2388. kfree(sci);
  2389. }
  2390. /**
  2391. * nilfs_attach_log_writer - attach log writer
  2392. * @sb: super block instance
  2393. * @root: root object of the current filesystem tree
  2394. *
  2395. * This allocates a log writer object, initializes it, and starts the
  2396. * log writer.
  2397. *
  2398. * Return Value: On success, 0 is returned. On error, one of the following
  2399. * negative error code is returned.
  2400. *
  2401. * %-ENOMEM - Insufficient memory available.
  2402. */
  2403. int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
  2404. {
  2405. struct the_nilfs *nilfs = sb->s_fs_info;
  2406. int err;
  2407. if (nilfs->ns_writer) {
  2408. /*
  2409. * This happens if the filesystem was remounted
  2410. * read/write after nilfs_error degenerated it into a
  2411. * read-only mount.
  2412. */
  2413. nilfs_detach_log_writer(sb);
  2414. }
  2415. nilfs->ns_writer = nilfs_segctor_new(sb, root);
  2416. if (!nilfs->ns_writer)
  2417. return -ENOMEM;
  2418. err = nilfs_segctor_start_thread(nilfs->ns_writer);
  2419. if (err) {
  2420. kfree(nilfs->ns_writer);
  2421. nilfs->ns_writer = NULL;
  2422. }
  2423. return err;
  2424. }
  2425. /**
  2426. * nilfs_detach_log_writer - destroy log writer
  2427. * @sb: super block instance
  2428. *
  2429. * This kills log writer daemon, frees the log writer object, and
  2430. * destroys list of dirty files.
  2431. */
  2432. void nilfs_detach_log_writer(struct super_block *sb)
  2433. {
  2434. struct the_nilfs *nilfs = sb->s_fs_info;
  2435. LIST_HEAD(garbage_list);
  2436. down_write(&nilfs->ns_segctor_sem);
  2437. if (nilfs->ns_writer) {
  2438. nilfs_segctor_destroy(nilfs->ns_writer);
  2439. nilfs->ns_writer = NULL;
  2440. }
  2441. /* Force to free the list of dirty files */
  2442. spin_lock(&nilfs->ns_inode_lock);
  2443. if (!list_empty(&nilfs->ns_dirty_files)) {
  2444. list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
  2445. nilfs_msg(sb, KERN_WARNING,
  2446. "disposed unprocessed dirty file(s) when detaching log writer");
  2447. }
  2448. spin_unlock(&nilfs->ns_inode_lock);
  2449. up_write(&nilfs->ns_segctor_sem);
  2450. nilfs_dispose_list(nilfs, &garbage_list, 1);
  2451. }