fs-writeback.c 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357
  1. /*
  2. * fs/fs-writeback.c
  3. *
  4. * Copyright (C) 2002, Linus Torvalds.
  5. *
  6. * Contains all the functions related to writing back and waiting
  7. * upon dirty inodes against superblocks, and writing back dirty
  8. * pages against inodes. ie: data writeback. Writeout of the
  9. * inode itself is not handled here.
  10. *
  11. * 10Apr2002 Andrew Morton
  12. * Split out of fs/inode.c
  13. * Additions for address_space-based writeback
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/export.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/slab.h>
  19. #include <linux/sched.h>
  20. #include <linux/fs.h>
  21. #include <linux/mm.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/kthread.h>
  24. #include <linux/writeback.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/backing-dev.h>
  27. #include <linux/tracepoint.h>
  28. #include <linux/device.h>
  29. #include <linux/memcontrol.h>
  30. #include "internal.h"
  31. /*
  32. * 4MB minimal write chunk size
  33. */
  34. #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
  35. struct wb_completion {
  36. atomic_t cnt;
  37. };
  38. /*
  39. * Passed into wb_writeback(), essentially a subset of writeback_control
  40. */
  41. struct wb_writeback_work {
  42. long nr_pages;
  43. struct super_block *sb;
  44. unsigned long *older_than_this;
  45. enum writeback_sync_modes sync_mode;
  46. unsigned int tagged_writepages:1;
  47. unsigned int for_kupdate:1;
  48. unsigned int range_cyclic:1;
  49. unsigned int for_background:1;
  50. unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
  51. unsigned int auto_free:1; /* free on completion */
  52. enum wb_reason reason; /* why was writeback initiated? */
  53. struct list_head list; /* pending work list */
  54. struct wb_completion *done; /* set if the caller waits */
  55. };
  56. /*
  57. * If one wants to wait for one or more wb_writeback_works, each work's
  58. * ->done should be set to a wb_completion defined using the following
  59. * macro. Once all work items are issued with wb_queue_work(), the caller
  60. * can wait for the completion of all using wb_wait_for_completion(). Work
  61. * items which are waited upon aren't freed automatically on completion.
  62. */
  63. #define DEFINE_WB_COMPLETION_ONSTACK(cmpl) \
  64. struct wb_completion cmpl = { \
  65. .cnt = ATOMIC_INIT(1), \
  66. }
  67. /*
  68. * If an inode is constantly having its pages dirtied, but then the
  69. * updates stop dirtytime_expire_interval seconds in the past, it's
  70. * possible for the worst case time between when an inode has its
  71. * timestamps updated and when they finally get written out to be two
  72. * dirtytime_expire_intervals. We set the default to 12 hours (in
  73. * seconds), which means most of the time inodes will have their
  74. * timestamps written to disk after 12 hours, but in the worst case a
  75. * few inodes might not their timestamps updated for 24 hours.
  76. */
  77. unsigned int dirtytime_expire_interval = 12 * 60 * 60;
  78. static inline struct inode *wb_inode(struct list_head *head)
  79. {
  80. return list_entry(head, struct inode, i_io_list);
  81. }
  82. /*
  83. * Include the creation of the trace points after defining the
  84. * wb_writeback_work structure and inline functions so that the definition
  85. * remains local to this file.
  86. */
  87. #define CREATE_TRACE_POINTS
  88. #include <trace/events/writeback.h>
  89. EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
  90. static bool wb_io_lists_populated(struct bdi_writeback *wb)
  91. {
  92. if (wb_has_dirty_io(wb)) {
  93. return false;
  94. } else {
  95. set_bit(WB_has_dirty_io, &wb->state);
  96. WARN_ON_ONCE(!wb->avg_write_bandwidth);
  97. atomic_long_add(wb->avg_write_bandwidth,
  98. &wb->bdi->tot_write_bandwidth);
  99. return true;
  100. }
  101. }
  102. static void wb_io_lists_depopulated(struct bdi_writeback *wb)
  103. {
  104. if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
  105. list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
  106. clear_bit(WB_has_dirty_io, &wb->state);
  107. WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
  108. &wb->bdi->tot_write_bandwidth) < 0);
  109. }
  110. }
  111. /**
  112. * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
  113. * @inode: inode to be moved
  114. * @wb: target bdi_writeback
  115. * @head: one of @wb->b_{dirty|io|more_io}
  116. *
  117. * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
  118. * Returns %true if @inode is the first occupant of the !dirty_time IO
  119. * lists; otherwise, %false.
  120. */
  121. static bool inode_io_list_move_locked(struct inode *inode,
  122. struct bdi_writeback *wb,
  123. struct list_head *head)
  124. {
  125. assert_spin_locked(&wb->list_lock);
  126. list_move(&inode->i_io_list, head);
  127. /* dirty_time doesn't count as dirty_io until expiration */
  128. if (head != &wb->b_dirty_time)
  129. return wb_io_lists_populated(wb);
  130. wb_io_lists_depopulated(wb);
  131. return false;
  132. }
  133. /**
  134. * inode_io_list_del_locked - remove an inode from its bdi_writeback IO list
  135. * @inode: inode to be removed
  136. * @wb: bdi_writeback @inode is being removed from
  137. *
  138. * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
  139. * clear %WB_has_dirty_io if all are empty afterwards.
  140. */
  141. static void inode_io_list_del_locked(struct inode *inode,
  142. struct bdi_writeback *wb)
  143. {
  144. assert_spin_locked(&wb->list_lock);
  145. list_del_init(&inode->i_io_list);
  146. wb_io_lists_depopulated(wb);
  147. }
  148. static void wb_wakeup(struct bdi_writeback *wb)
  149. {
  150. spin_lock_bh(&wb->work_lock);
  151. if (test_bit(WB_registered, &wb->state))
  152. mod_delayed_work(bdi_wq, &wb->dwork, 0);
  153. spin_unlock_bh(&wb->work_lock);
  154. }
  155. static void wb_queue_work(struct bdi_writeback *wb,
  156. struct wb_writeback_work *work)
  157. {
  158. trace_writeback_queue(wb, work);
  159. spin_lock_bh(&wb->work_lock);
  160. if (!test_bit(WB_registered, &wb->state))
  161. goto out_unlock;
  162. if (work->done)
  163. atomic_inc(&work->done->cnt);
  164. list_add_tail(&work->list, &wb->work_list);
  165. mod_delayed_work(bdi_wq, &wb->dwork, 0);
  166. out_unlock:
  167. spin_unlock_bh(&wb->work_lock);
  168. }
  169. /**
  170. * wb_wait_for_completion - wait for completion of bdi_writeback_works
  171. * @bdi: bdi work items were issued to
  172. * @done: target wb_completion
  173. *
  174. * Wait for one or more work items issued to @bdi with their ->done field
  175. * set to @done, which should have been defined with
  176. * DEFINE_WB_COMPLETION_ONSTACK(). This function returns after all such
  177. * work items are completed. Work items which are waited upon aren't freed
  178. * automatically on completion.
  179. */
  180. static void wb_wait_for_completion(struct backing_dev_info *bdi,
  181. struct wb_completion *done)
  182. {
  183. atomic_dec(&done->cnt); /* put down the initial count */
  184. wait_event(bdi->wb_waitq, !atomic_read(&done->cnt));
  185. }
  186. #ifdef CONFIG_CGROUP_WRITEBACK
  187. /* parameters for foreign inode detection, see wb_detach_inode() */
  188. #define WB_FRN_TIME_SHIFT 13 /* 1s = 2^13, upto 8 secs w/ 16bit */
  189. #define WB_FRN_TIME_AVG_SHIFT 3 /* avg = avg * 7/8 + new * 1/8 */
  190. #define WB_FRN_TIME_CUT_DIV 2 /* ignore rounds < avg / 2 */
  191. #define WB_FRN_TIME_PERIOD (2 * (1 << WB_FRN_TIME_SHIFT)) /* 2s */
  192. #define WB_FRN_HIST_SLOTS 16 /* inode->i_wb_frn_history is 16bit */
  193. #define WB_FRN_HIST_UNIT (WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
  194. /* each slot's duration is 2s / 16 */
  195. #define WB_FRN_HIST_THR_SLOTS (WB_FRN_HIST_SLOTS / 2)
  196. /* if foreign slots >= 8, switch */
  197. #define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1)
  198. /* one round can affect upto 5 slots */
  199. void __inode_attach_wb(struct inode *inode, struct page *page)
  200. {
  201. struct backing_dev_info *bdi = inode_to_bdi(inode);
  202. struct bdi_writeback *wb = NULL;
  203. if (inode_cgwb_enabled(inode)) {
  204. struct cgroup_subsys_state *memcg_css;
  205. if (page) {
  206. memcg_css = mem_cgroup_css_from_page(page);
  207. wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
  208. } else {
  209. /* must pin memcg_css, see wb_get_create() */
  210. memcg_css = task_get_css(current, memory_cgrp_id);
  211. wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
  212. css_put(memcg_css);
  213. }
  214. }
  215. if (!wb)
  216. wb = &bdi->wb;
  217. /*
  218. * There may be multiple instances of this function racing to
  219. * update the same inode. Use cmpxchg() to tell the winner.
  220. */
  221. if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
  222. wb_put(wb);
  223. }
  224. /**
  225. * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
  226. * @inode: inode of interest with i_lock held
  227. *
  228. * Returns @inode's wb with its list_lock held. @inode->i_lock must be
  229. * held on entry and is released on return. The returned wb is guaranteed
  230. * to stay @inode's associated wb until its list_lock is released.
  231. */
  232. static struct bdi_writeback *
  233. locked_inode_to_wb_and_lock_list(struct inode *inode)
  234. __releases(&inode->i_lock)
  235. __acquires(&wb->list_lock)
  236. {
  237. while (true) {
  238. struct bdi_writeback *wb = inode_to_wb(inode);
  239. /*
  240. * inode_to_wb() association is protected by both
  241. * @inode->i_lock and @wb->list_lock but list_lock nests
  242. * outside i_lock. Drop i_lock and verify that the
  243. * association hasn't changed after acquiring list_lock.
  244. */
  245. wb_get(wb);
  246. spin_unlock(&inode->i_lock);
  247. spin_lock(&wb->list_lock);
  248. wb_put(wb); /* not gonna deref it anymore */
  249. /* i_wb may have changed inbetween, can't use inode_to_wb() */
  250. if (likely(wb == inode->i_wb))
  251. return wb; /* @inode already has ref */
  252. spin_unlock(&wb->list_lock);
  253. cpu_relax();
  254. spin_lock(&inode->i_lock);
  255. }
  256. }
  257. /**
  258. * inode_to_wb_and_lock_list - determine an inode's wb and lock it
  259. * @inode: inode of interest
  260. *
  261. * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
  262. * on entry.
  263. */
  264. static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
  265. __acquires(&wb->list_lock)
  266. {
  267. spin_lock(&inode->i_lock);
  268. return locked_inode_to_wb_and_lock_list(inode);
  269. }
  270. struct inode_switch_wbs_context {
  271. struct inode *inode;
  272. struct bdi_writeback *new_wb;
  273. struct rcu_head rcu_head;
  274. struct work_struct work;
  275. };
  276. static void inode_switch_wbs_work_fn(struct work_struct *work)
  277. {
  278. struct inode_switch_wbs_context *isw =
  279. container_of(work, struct inode_switch_wbs_context, work);
  280. struct inode *inode = isw->inode;
  281. struct address_space *mapping = inode->i_mapping;
  282. struct bdi_writeback *old_wb = inode->i_wb;
  283. struct bdi_writeback *new_wb = isw->new_wb;
  284. struct radix_tree_iter iter;
  285. bool switched = false;
  286. void **slot;
  287. /*
  288. * By the time control reaches here, RCU grace period has passed
  289. * since I_WB_SWITCH assertion and all wb stat update transactions
  290. * between unlocked_inode_to_wb_begin/end() are guaranteed to be
  291. * synchronizing against mapping->tree_lock.
  292. *
  293. * Grabbing old_wb->list_lock, inode->i_lock and mapping->tree_lock
  294. * gives us exclusion against all wb related operations on @inode
  295. * including IO list manipulations and stat updates.
  296. */
  297. if (old_wb < new_wb) {
  298. spin_lock(&old_wb->list_lock);
  299. spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
  300. } else {
  301. spin_lock(&new_wb->list_lock);
  302. spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
  303. }
  304. spin_lock(&inode->i_lock);
  305. spin_lock_irq(&mapping->tree_lock);
  306. /*
  307. * Once I_FREEING is visible under i_lock, the eviction path owns
  308. * the inode and we shouldn't modify ->i_io_list.
  309. */
  310. if (unlikely(inode->i_state & I_FREEING))
  311. goto skip_switch;
  312. /*
  313. * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points
  314. * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
  315. * pages actually under underwriteback.
  316. */
  317. radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0,
  318. PAGECACHE_TAG_DIRTY) {
  319. struct page *page = radix_tree_deref_slot_protected(slot,
  320. &mapping->tree_lock);
  321. if (likely(page) && PageDirty(page)) {
  322. __dec_wb_stat(old_wb, WB_RECLAIMABLE);
  323. __inc_wb_stat(new_wb, WB_RECLAIMABLE);
  324. }
  325. }
  326. radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0,
  327. PAGECACHE_TAG_WRITEBACK) {
  328. struct page *page = radix_tree_deref_slot_protected(slot,
  329. &mapping->tree_lock);
  330. if (likely(page)) {
  331. WARN_ON_ONCE(!PageWriteback(page));
  332. __dec_wb_stat(old_wb, WB_WRITEBACK);
  333. __inc_wb_stat(new_wb, WB_WRITEBACK);
  334. }
  335. }
  336. wb_get(new_wb);
  337. /*
  338. * Transfer to @new_wb's IO list if necessary. The specific list
  339. * @inode was on is ignored and the inode is put on ->b_dirty which
  340. * is always correct including from ->b_dirty_time. The transfer
  341. * preserves @inode->dirtied_when ordering.
  342. */
  343. if (!list_empty(&inode->i_io_list)) {
  344. struct inode *pos;
  345. inode_io_list_del_locked(inode, old_wb);
  346. inode->i_wb = new_wb;
  347. list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
  348. if (time_after_eq(inode->dirtied_when,
  349. pos->dirtied_when))
  350. break;
  351. inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev);
  352. } else {
  353. inode->i_wb = new_wb;
  354. }
  355. /* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */
  356. inode->i_wb_frn_winner = 0;
  357. inode->i_wb_frn_avg_time = 0;
  358. inode->i_wb_frn_history = 0;
  359. switched = true;
  360. skip_switch:
  361. /*
  362. * Paired with load_acquire in unlocked_inode_to_wb_begin() and
  363. * ensures that the new wb is visible if they see !I_WB_SWITCH.
  364. */
  365. smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
  366. spin_unlock_irq(&mapping->tree_lock);
  367. spin_unlock(&inode->i_lock);
  368. spin_unlock(&new_wb->list_lock);
  369. spin_unlock(&old_wb->list_lock);
  370. if (switched) {
  371. wb_wakeup(new_wb);
  372. wb_put(old_wb);
  373. }
  374. wb_put(new_wb);
  375. iput(inode);
  376. kfree(isw);
  377. }
  378. static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
  379. {
  380. struct inode_switch_wbs_context *isw = container_of(rcu_head,
  381. struct inode_switch_wbs_context, rcu_head);
  382. /* needs to grab bh-unsafe locks, bounce to work item */
  383. INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
  384. schedule_work(&isw->work);
  385. }
  386. /**
  387. * inode_switch_wbs - change the wb association of an inode
  388. * @inode: target inode
  389. * @new_wb_id: ID of the new wb
  390. *
  391. * Switch @inode's wb association to the wb identified by @new_wb_id. The
  392. * switching is performed asynchronously and may fail silently.
  393. */
  394. static void inode_switch_wbs(struct inode *inode, int new_wb_id)
  395. {
  396. struct backing_dev_info *bdi = inode_to_bdi(inode);
  397. struct cgroup_subsys_state *memcg_css;
  398. struct inode_switch_wbs_context *isw;
  399. /* noop if seems to be already in progress */
  400. if (inode->i_state & I_WB_SWITCH)
  401. return;
  402. isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
  403. if (!isw)
  404. return;
  405. /* find and pin the new wb */
  406. rcu_read_lock();
  407. memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
  408. if (memcg_css)
  409. isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
  410. rcu_read_unlock();
  411. if (!isw->new_wb)
  412. goto out_free;
  413. /* while holding I_WB_SWITCH, no one else can update the association */
  414. spin_lock(&inode->i_lock);
  415. if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
  416. inode_to_wb(inode) == isw->new_wb) {
  417. spin_unlock(&inode->i_lock);
  418. goto out_free;
  419. }
  420. inode->i_state |= I_WB_SWITCH;
  421. spin_unlock(&inode->i_lock);
  422. ihold(inode);
  423. isw->inode = inode;
  424. /*
  425. * In addition to synchronizing among switchers, I_WB_SWITCH tells
  426. * the RCU protected stat update paths to grab the mapping's
  427. * tree_lock so that stat transfer can synchronize against them.
  428. * Let's continue after I_WB_SWITCH is guaranteed to be visible.
  429. */
  430. call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
  431. return;
  432. out_free:
  433. if (isw->new_wb)
  434. wb_put(isw->new_wb);
  435. kfree(isw);
  436. }
  437. /**
  438. * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
  439. * @wbc: writeback_control of interest
  440. * @inode: target inode
  441. *
  442. * @inode is locked and about to be written back under the control of @wbc.
  443. * Record @inode's writeback context into @wbc and unlock the i_lock. On
  444. * writeback completion, wbc_detach_inode() should be called. This is used
  445. * to track the cgroup writeback context.
  446. */
  447. void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
  448. struct inode *inode)
  449. {
  450. if (!inode_cgwb_enabled(inode)) {
  451. spin_unlock(&inode->i_lock);
  452. return;
  453. }
  454. wbc->wb = inode_to_wb(inode);
  455. wbc->inode = inode;
  456. wbc->wb_id = wbc->wb->memcg_css->id;
  457. wbc->wb_lcand_id = inode->i_wb_frn_winner;
  458. wbc->wb_tcand_id = 0;
  459. wbc->wb_bytes = 0;
  460. wbc->wb_lcand_bytes = 0;
  461. wbc->wb_tcand_bytes = 0;
  462. wb_get(wbc->wb);
  463. spin_unlock(&inode->i_lock);
  464. /*
  465. * A dying wb indicates that the memcg-blkcg mapping has changed
  466. * and a new wb is already serving the memcg. Switch immediately.
  467. */
  468. if (unlikely(wb_dying(wbc->wb)))
  469. inode_switch_wbs(inode, wbc->wb_id);
  470. }
  471. /**
  472. * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
  473. * @wbc: writeback_control of the just finished writeback
  474. *
  475. * To be called after a writeback attempt of an inode finishes and undoes
  476. * wbc_attach_and_unlock_inode(). Can be called under any context.
  477. *
  478. * As concurrent write sharing of an inode is expected to be very rare and
  479. * memcg only tracks page ownership on first-use basis severely confining
  480. * the usefulness of such sharing, cgroup writeback tracks ownership
  481. * per-inode. While the support for concurrent write sharing of an inode
  482. * is deemed unnecessary, an inode being written to by different cgroups at
  483. * different points in time is a lot more common, and, more importantly,
  484. * charging only by first-use can too readily lead to grossly incorrect
  485. * behaviors (single foreign page can lead to gigabytes of writeback to be
  486. * incorrectly attributed).
  487. *
  488. * To resolve this issue, cgroup writeback detects the majority dirtier of
  489. * an inode and transfers the ownership to it. To avoid unnnecessary
  490. * oscillation, the detection mechanism keeps track of history and gives
  491. * out the switch verdict only if the foreign usage pattern is stable over
  492. * a certain amount of time and/or writeback attempts.
  493. *
  494. * On each writeback attempt, @wbc tries to detect the majority writer
  495. * using Boyer-Moore majority vote algorithm. In addition to the byte
  496. * count from the majority voting, it also counts the bytes written for the
  497. * current wb and the last round's winner wb (max of last round's current
  498. * wb, the winner from two rounds ago, and the last round's majority
  499. * candidate). Keeping track of the historical winner helps the algorithm
  500. * to semi-reliably detect the most active writer even when it's not the
  501. * absolute majority.
  502. *
  503. * Once the winner of the round is determined, whether the winner is
  504. * foreign or not and how much IO time the round consumed is recorded in
  505. * inode->i_wb_frn_history. If the amount of recorded foreign IO time is
  506. * over a certain threshold, the switch verdict is given.
  507. */
  508. void wbc_detach_inode(struct writeback_control *wbc)
  509. {
  510. struct bdi_writeback *wb = wbc->wb;
  511. struct inode *inode = wbc->inode;
  512. unsigned long avg_time, max_bytes, max_time;
  513. u16 history;
  514. int max_id;
  515. if (!wb)
  516. return;
  517. history = inode->i_wb_frn_history;
  518. avg_time = inode->i_wb_frn_avg_time;
  519. /* pick the winner of this round */
  520. if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
  521. wbc->wb_bytes >= wbc->wb_tcand_bytes) {
  522. max_id = wbc->wb_id;
  523. max_bytes = wbc->wb_bytes;
  524. } else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
  525. max_id = wbc->wb_lcand_id;
  526. max_bytes = wbc->wb_lcand_bytes;
  527. } else {
  528. max_id = wbc->wb_tcand_id;
  529. max_bytes = wbc->wb_tcand_bytes;
  530. }
  531. /*
  532. * Calculate the amount of IO time the winner consumed and fold it
  533. * into the running average kept per inode. If the consumed IO
  534. * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
  535. * deciding whether to switch or not. This is to prevent one-off
  536. * small dirtiers from skewing the verdict.
  537. */
  538. max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
  539. wb->avg_write_bandwidth);
  540. if (avg_time)
  541. avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
  542. (avg_time >> WB_FRN_TIME_AVG_SHIFT);
  543. else
  544. avg_time = max_time; /* immediate catch up on first run */
  545. if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
  546. int slots;
  547. /*
  548. * The switch verdict is reached if foreign wb's consume
  549. * more than a certain proportion of IO time in a
  550. * WB_FRN_TIME_PERIOD. This is loosely tracked by 16 slot
  551. * history mask where each bit represents one sixteenth of
  552. * the period. Determine the number of slots to shift into
  553. * history from @max_time.
  554. */
  555. slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
  556. (unsigned long)WB_FRN_HIST_MAX_SLOTS);
  557. history <<= slots;
  558. if (wbc->wb_id != max_id)
  559. history |= (1U << slots) - 1;
  560. /*
  561. * Switch if the current wb isn't the consistent winner.
  562. * If there are multiple closely competing dirtiers, the
  563. * inode may switch across them repeatedly over time, which
  564. * is okay. The main goal is avoiding keeping an inode on
  565. * the wrong wb for an extended period of time.
  566. */
  567. if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
  568. inode_switch_wbs(inode, max_id);
  569. }
  570. /*
  571. * Multiple instances of this function may race to update the
  572. * following fields but we don't mind occassional inaccuracies.
  573. */
  574. inode->i_wb_frn_winner = max_id;
  575. inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
  576. inode->i_wb_frn_history = history;
  577. wb_put(wbc->wb);
  578. wbc->wb = NULL;
  579. }
  580. /**
  581. * wbc_account_io - account IO issued during writeback
  582. * @wbc: writeback_control of the writeback in progress
  583. * @page: page being written out
  584. * @bytes: number of bytes being written out
  585. *
  586. * @bytes from @page are about to written out during the writeback
  587. * controlled by @wbc. Keep the book for foreign inode detection. See
  588. * wbc_detach_inode().
  589. */
  590. void wbc_account_io(struct writeback_control *wbc, struct page *page,
  591. size_t bytes)
  592. {
  593. int id;
  594. /*
  595. * pageout() path doesn't attach @wbc to the inode being written
  596. * out. This is intentional as we don't want the function to block
  597. * behind a slow cgroup. Ultimately, we want pageout() to kick off
  598. * regular writeback instead of writing things out itself.
  599. */
  600. if (!wbc->wb)
  601. return;
  602. id = mem_cgroup_css_from_page(page)->id;
  603. if (id == wbc->wb_id) {
  604. wbc->wb_bytes += bytes;
  605. return;
  606. }
  607. if (id == wbc->wb_lcand_id)
  608. wbc->wb_lcand_bytes += bytes;
  609. /* Boyer-Moore majority vote algorithm */
  610. if (!wbc->wb_tcand_bytes)
  611. wbc->wb_tcand_id = id;
  612. if (id == wbc->wb_tcand_id)
  613. wbc->wb_tcand_bytes += bytes;
  614. else
  615. wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
  616. }
  617. EXPORT_SYMBOL_GPL(wbc_account_io);
  618. /**
  619. * inode_congested - test whether an inode is congested
  620. * @inode: inode to test for congestion (may be NULL)
  621. * @cong_bits: mask of WB_[a]sync_congested bits to test
  622. *
  623. * Tests whether @inode is congested. @cong_bits is the mask of congestion
  624. * bits to test and the return value is the mask of set bits.
  625. *
  626. * If cgroup writeback is enabled for @inode, the congestion state is
  627. * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
  628. * associated with @inode is congested; otherwise, the root wb's congestion
  629. * state is used.
  630. *
  631. * @inode is allowed to be NULL as this function is often called on
  632. * mapping->host which is NULL for the swapper space.
  633. */
  634. int inode_congested(struct inode *inode, int cong_bits)
  635. {
  636. /*
  637. * Once set, ->i_wb never becomes NULL while the inode is alive.
  638. * Start transaction iff ->i_wb is visible.
  639. */
  640. if (inode && inode_to_wb_is_valid(inode)) {
  641. struct bdi_writeback *wb;
  642. bool locked, congested;
  643. wb = unlocked_inode_to_wb_begin(inode, &locked);
  644. congested = wb_congested(wb, cong_bits);
  645. unlocked_inode_to_wb_end(inode, locked);
  646. return congested;
  647. }
  648. return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
  649. }
  650. EXPORT_SYMBOL_GPL(inode_congested);
  651. /**
  652. * wb_split_bdi_pages - split nr_pages to write according to bandwidth
  653. * @wb: target bdi_writeback to split @nr_pages to
  654. * @nr_pages: number of pages to write for the whole bdi
  655. *
  656. * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
  657. * relation to the total write bandwidth of all wb's w/ dirty inodes on
  658. * @wb->bdi.
  659. */
  660. static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
  661. {
  662. unsigned long this_bw = wb->avg_write_bandwidth;
  663. unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
  664. if (nr_pages == LONG_MAX)
  665. return LONG_MAX;
  666. /*
  667. * This may be called on clean wb's and proportional distribution
  668. * may not make sense, just use the original @nr_pages in those
  669. * cases. In general, we wanna err on the side of writing more.
  670. */
  671. if (!tot_bw || this_bw >= tot_bw)
  672. return nr_pages;
  673. else
  674. return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
  675. }
  676. /**
  677. * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
  678. * @bdi: target backing_dev_info
  679. * @base_work: wb_writeback_work to issue
  680. * @skip_if_busy: skip wb's which already have writeback in progress
  681. *
  682. * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
  683. * have dirty inodes. If @base_work->nr_page isn't %LONG_MAX, it's
  684. * distributed to the busy wbs according to each wb's proportion in the
  685. * total active write bandwidth of @bdi.
  686. */
  687. static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
  688. struct wb_writeback_work *base_work,
  689. bool skip_if_busy)
  690. {
  691. struct bdi_writeback *last_wb = NULL;
  692. struct bdi_writeback *wb = list_entry(&bdi->wb_list,
  693. struct bdi_writeback, bdi_node);
  694. might_sleep();
  695. restart:
  696. rcu_read_lock();
  697. list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
  698. DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done);
  699. struct wb_writeback_work fallback_work;
  700. struct wb_writeback_work *work;
  701. long nr_pages;
  702. if (last_wb) {
  703. wb_put(last_wb);
  704. last_wb = NULL;
  705. }
  706. /* SYNC_ALL writes out I_DIRTY_TIME too */
  707. if (!wb_has_dirty_io(wb) &&
  708. (base_work->sync_mode == WB_SYNC_NONE ||
  709. list_empty(&wb->b_dirty_time)))
  710. continue;
  711. if (skip_if_busy && writeback_in_progress(wb))
  712. continue;
  713. nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
  714. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  715. if (work) {
  716. *work = *base_work;
  717. work->nr_pages = nr_pages;
  718. work->auto_free = 1;
  719. wb_queue_work(wb, work);
  720. continue;
  721. }
  722. /* alloc failed, execute synchronously using on-stack fallback */
  723. work = &fallback_work;
  724. *work = *base_work;
  725. work->nr_pages = nr_pages;
  726. work->auto_free = 0;
  727. work->done = &fallback_work_done;
  728. wb_queue_work(wb, work);
  729. /*
  730. * Pin @wb so that it stays on @bdi->wb_list. This allows
  731. * continuing iteration from @wb after dropping and
  732. * regrabbing rcu read lock.
  733. */
  734. wb_get(wb);
  735. last_wb = wb;
  736. rcu_read_unlock();
  737. wb_wait_for_completion(bdi, &fallback_work_done);
  738. goto restart;
  739. }
  740. rcu_read_unlock();
  741. if (last_wb)
  742. wb_put(last_wb);
  743. }
  744. #else /* CONFIG_CGROUP_WRITEBACK */
  745. static struct bdi_writeback *
  746. locked_inode_to_wb_and_lock_list(struct inode *inode)
  747. __releases(&inode->i_lock)
  748. __acquires(&wb->list_lock)
  749. {
  750. struct bdi_writeback *wb = inode_to_wb(inode);
  751. spin_unlock(&inode->i_lock);
  752. spin_lock(&wb->list_lock);
  753. return wb;
  754. }
  755. static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
  756. __acquires(&wb->list_lock)
  757. {
  758. struct bdi_writeback *wb = inode_to_wb(inode);
  759. spin_lock(&wb->list_lock);
  760. return wb;
  761. }
  762. static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
  763. {
  764. return nr_pages;
  765. }
  766. static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
  767. struct wb_writeback_work *base_work,
  768. bool skip_if_busy)
  769. {
  770. might_sleep();
  771. if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
  772. base_work->auto_free = 0;
  773. wb_queue_work(&bdi->wb, base_work);
  774. }
  775. }
  776. #endif /* CONFIG_CGROUP_WRITEBACK */
  777. void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
  778. bool range_cyclic, enum wb_reason reason)
  779. {
  780. struct wb_writeback_work *work;
  781. if (!wb_has_dirty_io(wb))
  782. return;
  783. /*
  784. * This is WB_SYNC_NONE writeback, so if allocation fails just
  785. * wakeup the thread for old dirty data writeback
  786. */
  787. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  788. if (!work) {
  789. trace_writeback_nowork(wb);
  790. wb_wakeup(wb);
  791. return;
  792. }
  793. work->sync_mode = WB_SYNC_NONE;
  794. work->nr_pages = nr_pages;
  795. work->range_cyclic = range_cyclic;
  796. work->reason = reason;
  797. work->auto_free = 1;
  798. wb_queue_work(wb, work);
  799. }
  800. /**
  801. * wb_start_background_writeback - start background writeback
  802. * @wb: bdi_writback to write from
  803. *
  804. * Description:
  805. * This makes sure WB_SYNC_NONE background writeback happens. When
  806. * this function returns, it is only guaranteed that for given wb
  807. * some IO is happening if we are over background dirty threshold.
  808. * Caller need not hold sb s_umount semaphore.
  809. */
  810. void wb_start_background_writeback(struct bdi_writeback *wb)
  811. {
  812. /*
  813. * We just wake up the flusher thread. It will perform background
  814. * writeback as soon as there is no other work to do.
  815. */
  816. trace_writeback_wake_background(wb);
  817. wb_wakeup(wb);
  818. }
  819. /*
  820. * Remove the inode from the writeback list it is on.
  821. */
  822. void inode_io_list_del(struct inode *inode)
  823. {
  824. struct bdi_writeback *wb;
  825. wb = inode_to_wb_and_lock_list(inode);
  826. inode_io_list_del_locked(inode, wb);
  827. spin_unlock(&wb->list_lock);
  828. }
  829. /*
  830. * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
  831. * furthest end of its superblock's dirty-inode list.
  832. *
  833. * Before stamping the inode's ->dirtied_when, we check to see whether it is
  834. * already the most-recently-dirtied inode on the b_dirty list. If that is
  835. * the case then the inode must have been redirtied while it was being written
  836. * out and we don't reset its dirtied_when.
  837. */
  838. static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
  839. {
  840. if (!list_empty(&wb->b_dirty)) {
  841. struct inode *tail;
  842. tail = wb_inode(wb->b_dirty.next);
  843. if (time_before(inode->dirtied_when, tail->dirtied_when))
  844. inode->dirtied_when = jiffies;
  845. }
  846. inode_io_list_move_locked(inode, wb, &wb->b_dirty);
  847. }
  848. /*
  849. * requeue inode for re-scanning after bdi->b_io list is exhausted.
  850. */
  851. static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
  852. {
  853. inode_io_list_move_locked(inode, wb, &wb->b_more_io);
  854. }
  855. static void inode_sync_complete(struct inode *inode)
  856. {
  857. inode->i_state &= ~I_SYNC;
  858. /* If inode is clean an unused, put it into LRU now... */
  859. inode_add_lru(inode);
  860. /* Waiters must see I_SYNC cleared before being woken up */
  861. smp_mb();
  862. wake_up_bit(&inode->i_state, __I_SYNC);
  863. }
  864. static bool inode_dirtied_after(struct inode *inode, unsigned long t)
  865. {
  866. bool ret = time_after(inode->dirtied_when, t);
  867. #ifndef CONFIG_64BIT
  868. /*
  869. * For inodes being constantly redirtied, dirtied_when can get stuck.
  870. * It _appears_ to be in the future, but is actually in distant past.
  871. * This test is necessary to prevent such wrapped-around relative times
  872. * from permanently stopping the whole bdi writeback.
  873. */
  874. ret = ret && time_before_eq(inode->dirtied_when, jiffies);
  875. #endif
  876. return ret;
  877. }
  878. #define EXPIRE_DIRTY_ATIME 0x0001
  879. /*
  880. * Move expired (dirtied before work->older_than_this) dirty inodes from
  881. * @delaying_queue to @dispatch_queue.
  882. */
  883. static int move_expired_inodes(struct list_head *delaying_queue,
  884. struct list_head *dispatch_queue,
  885. int flags,
  886. struct wb_writeback_work *work)
  887. {
  888. unsigned long *older_than_this = NULL;
  889. unsigned long expire_time;
  890. LIST_HEAD(tmp);
  891. struct list_head *pos, *node;
  892. struct super_block *sb = NULL;
  893. struct inode *inode;
  894. int do_sb_sort = 0;
  895. int moved = 0;
  896. if ((flags & EXPIRE_DIRTY_ATIME) == 0)
  897. older_than_this = work->older_than_this;
  898. else if (!work->for_sync) {
  899. expire_time = jiffies - (dirtytime_expire_interval * HZ);
  900. older_than_this = &expire_time;
  901. }
  902. while (!list_empty(delaying_queue)) {
  903. inode = wb_inode(delaying_queue->prev);
  904. if (older_than_this &&
  905. inode_dirtied_after(inode, *older_than_this))
  906. break;
  907. list_move(&inode->i_io_list, &tmp);
  908. moved++;
  909. if (flags & EXPIRE_DIRTY_ATIME)
  910. set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
  911. if (sb_is_blkdev_sb(inode->i_sb))
  912. continue;
  913. if (sb && sb != inode->i_sb)
  914. do_sb_sort = 1;
  915. sb = inode->i_sb;
  916. }
  917. /* just one sb in list, splice to dispatch_queue and we're done */
  918. if (!do_sb_sort) {
  919. list_splice(&tmp, dispatch_queue);
  920. goto out;
  921. }
  922. /* Move inodes from one superblock together */
  923. while (!list_empty(&tmp)) {
  924. sb = wb_inode(tmp.prev)->i_sb;
  925. list_for_each_prev_safe(pos, node, &tmp) {
  926. inode = wb_inode(pos);
  927. if (inode->i_sb == sb)
  928. list_move(&inode->i_io_list, dispatch_queue);
  929. }
  930. }
  931. out:
  932. return moved;
  933. }
  934. /*
  935. * Queue all expired dirty inodes for io, eldest first.
  936. * Before
  937. * newly dirtied b_dirty b_io b_more_io
  938. * =============> gf edc BA
  939. * After
  940. * newly dirtied b_dirty b_io b_more_io
  941. * =============> g fBAedc
  942. * |
  943. * +--> dequeue for IO
  944. */
  945. static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
  946. {
  947. int moved;
  948. assert_spin_locked(&wb->list_lock);
  949. list_splice_init(&wb->b_more_io, &wb->b_io);
  950. moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
  951. moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
  952. EXPIRE_DIRTY_ATIME, work);
  953. if (moved)
  954. wb_io_lists_populated(wb);
  955. trace_writeback_queue_io(wb, work, moved);
  956. }
  957. static int write_inode(struct inode *inode, struct writeback_control *wbc)
  958. {
  959. int ret;
  960. if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
  961. trace_writeback_write_inode_start(inode, wbc);
  962. ret = inode->i_sb->s_op->write_inode(inode, wbc);
  963. trace_writeback_write_inode(inode, wbc);
  964. return ret;
  965. }
  966. return 0;
  967. }
  968. /*
  969. * Wait for writeback on an inode to complete. Called with i_lock held.
  970. * Caller must make sure inode cannot go away when we drop i_lock.
  971. */
  972. static void __inode_wait_for_writeback(struct inode *inode)
  973. __releases(inode->i_lock)
  974. __acquires(inode->i_lock)
  975. {
  976. DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
  977. wait_queue_head_t *wqh;
  978. wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
  979. while (inode->i_state & I_SYNC) {
  980. spin_unlock(&inode->i_lock);
  981. __wait_on_bit(wqh, &wq, bit_wait,
  982. TASK_UNINTERRUPTIBLE);
  983. spin_lock(&inode->i_lock);
  984. }
  985. }
  986. /*
  987. * Wait for writeback on an inode to complete. Caller must have inode pinned.
  988. */
  989. void inode_wait_for_writeback(struct inode *inode)
  990. {
  991. spin_lock(&inode->i_lock);
  992. __inode_wait_for_writeback(inode);
  993. spin_unlock(&inode->i_lock);
  994. }
  995. /*
  996. * Sleep until I_SYNC is cleared. This function must be called with i_lock
  997. * held and drops it. It is aimed for callers not holding any inode reference
  998. * so once i_lock is dropped, inode can go away.
  999. */
  1000. static void inode_sleep_on_writeback(struct inode *inode)
  1001. __releases(inode->i_lock)
  1002. {
  1003. DEFINE_WAIT(wait);
  1004. wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
  1005. int sleep;
  1006. prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
  1007. sleep = inode->i_state & I_SYNC;
  1008. spin_unlock(&inode->i_lock);
  1009. if (sleep)
  1010. schedule();
  1011. finish_wait(wqh, &wait);
  1012. }
  1013. /*
  1014. * Find proper writeback list for the inode depending on its current state and
  1015. * possibly also change of its state while we were doing writeback. Here we
  1016. * handle things such as livelock prevention or fairness of writeback among
  1017. * inodes. This function can be called only by flusher thread - noone else
  1018. * processes all inodes in writeback lists and requeueing inodes behind flusher
  1019. * thread's back can have unexpected consequences.
  1020. */
  1021. static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
  1022. struct writeback_control *wbc)
  1023. {
  1024. if (inode->i_state & I_FREEING)
  1025. return;
  1026. /*
  1027. * Sync livelock prevention. Each inode is tagged and synced in one
  1028. * shot. If still dirty, it will be redirty_tail()'ed below. Update
  1029. * the dirty time to prevent enqueue and sync it again.
  1030. */
  1031. if ((inode->i_state & I_DIRTY) &&
  1032. (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
  1033. inode->dirtied_when = jiffies;
  1034. if (wbc->pages_skipped) {
  1035. /*
  1036. * writeback is not making progress due to locked
  1037. * buffers. Skip this inode for now.
  1038. */
  1039. redirty_tail(inode, wb);
  1040. return;
  1041. }
  1042. if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
  1043. /*
  1044. * We didn't write back all the pages. nfs_writepages()
  1045. * sometimes bales out without doing anything.
  1046. */
  1047. if (wbc->nr_to_write <= 0) {
  1048. /* Slice used up. Queue for next turn. */
  1049. requeue_io(inode, wb);
  1050. } else {
  1051. /*
  1052. * Writeback blocked by something other than
  1053. * congestion. Delay the inode for some time to
  1054. * avoid spinning on the CPU (100% iowait)
  1055. * retrying writeback of the dirty page/inode
  1056. * that cannot be performed immediately.
  1057. */
  1058. redirty_tail(inode, wb);
  1059. }
  1060. } else if (inode->i_state & I_DIRTY) {
  1061. /*
  1062. * Filesystems can dirty the inode during writeback operations,
  1063. * such as delayed allocation during submission or metadata
  1064. * updates after data IO completion.
  1065. */
  1066. redirty_tail(inode, wb);
  1067. } else if (inode->i_state & I_DIRTY_TIME) {
  1068. inode->dirtied_when = jiffies;
  1069. inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
  1070. } else {
  1071. /* The inode is clean. Remove from writeback lists. */
  1072. inode_io_list_del_locked(inode, wb);
  1073. }
  1074. }
  1075. /*
  1076. * Write out an inode and its dirty pages. Do not update the writeback list
  1077. * linkage. That is left to the caller. The caller is also responsible for
  1078. * setting I_SYNC flag and calling inode_sync_complete() to clear it.
  1079. */
  1080. static int
  1081. __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
  1082. {
  1083. struct address_space *mapping = inode->i_mapping;
  1084. long nr_to_write = wbc->nr_to_write;
  1085. unsigned dirty;
  1086. int ret;
  1087. WARN_ON(!(inode->i_state & I_SYNC));
  1088. trace_writeback_single_inode_start(inode, wbc, nr_to_write);
  1089. ret = do_writepages(mapping, wbc);
  1090. /*
  1091. * Make sure to wait on the data before writing out the metadata.
  1092. * This is important for filesystems that modify metadata on data
  1093. * I/O completion. We don't do it for sync(2) writeback because it has a
  1094. * separate, external IO completion path and ->sync_fs for guaranteeing
  1095. * inode metadata is written back correctly.
  1096. */
  1097. if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
  1098. int err = filemap_fdatawait(mapping);
  1099. if (ret == 0)
  1100. ret = err;
  1101. }
  1102. /*
  1103. * Some filesystems may redirty the inode during the writeback
  1104. * due to delalloc, clear dirty metadata flags right before
  1105. * write_inode()
  1106. */
  1107. spin_lock(&inode->i_lock);
  1108. dirty = inode->i_state & I_DIRTY;
  1109. if (inode->i_state & I_DIRTY_TIME) {
  1110. if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
  1111. unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
  1112. unlikely(time_after(jiffies,
  1113. (inode->dirtied_time_when +
  1114. dirtytime_expire_interval * HZ)))) {
  1115. dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
  1116. trace_writeback_lazytime(inode);
  1117. }
  1118. } else
  1119. inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
  1120. inode->i_state &= ~dirty;
  1121. /*
  1122. * Paired with smp_mb() in __mark_inode_dirty(). This allows
  1123. * __mark_inode_dirty() to test i_state without grabbing i_lock -
  1124. * either they see the I_DIRTY bits cleared or we see the dirtied
  1125. * inode.
  1126. *
  1127. * I_DIRTY_PAGES is always cleared together above even if @mapping
  1128. * still has dirty pages. The flag is reinstated after smp_mb() if
  1129. * necessary. This guarantees that either __mark_inode_dirty()
  1130. * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
  1131. */
  1132. smp_mb();
  1133. if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
  1134. inode->i_state |= I_DIRTY_PAGES;
  1135. spin_unlock(&inode->i_lock);
  1136. if (dirty & I_DIRTY_TIME)
  1137. mark_inode_dirty_sync(inode);
  1138. /* Don't write the inode if only I_DIRTY_PAGES was set */
  1139. if (dirty & ~I_DIRTY_PAGES) {
  1140. int err = write_inode(inode, wbc);
  1141. if (ret == 0)
  1142. ret = err;
  1143. }
  1144. trace_writeback_single_inode(inode, wbc, nr_to_write);
  1145. return ret;
  1146. }
  1147. /*
  1148. * Write out an inode's dirty pages. Either the caller has an active reference
  1149. * on the inode or the inode has I_WILL_FREE set.
  1150. *
  1151. * This function is designed to be called for writing back one inode which
  1152. * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
  1153. * and does more profound writeback list handling in writeback_sb_inodes().
  1154. */
  1155. static int
  1156. writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
  1157. struct writeback_control *wbc)
  1158. {
  1159. int ret = 0;
  1160. spin_lock(&inode->i_lock);
  1161. if (!atomic_read(&inode->i_count))
  1162. WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
  1163. else
  1164. WARN_ON(inode->i_state & I_WILL_FREE);
  1165. if (inode->i_state & I_SYNC) {
  1166. if (wbc->sync_mode != WB_SYNC_ALL)
  1167. goto out;
  1168. /*
  1169. * It's a data-integrity sync. We must wait. Since callers hold
  1170. * inode reference or inode has I_WILL_FREE set, it cannot go
  1171. * away under us.
  1172. */
  1173. __inode_wait_for_writeback(inode);
  1174. }
  1175. WARN_ON(inode->i_state & I_SYNC);
  1176. /*
  1177. * Skip inode if it is clean and we have no outstanding writeback in
  1178. * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
  1179. * function since flusher thread may be doing for example sync in
  1180. * parallel and if we move the inode, it could get skipped. So here we
  1181. * make sure inode is on some writeback list and leave it there unless
  1182. * we have completely cleaned the inode.
  1183. */
  1184. if (!(inode->i_state & I_DIRTY_ALL) &&
  1185. (wbc->sync_mode != WB_SYNC_ALL ||
  1186. !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
  1187. goto out;
  1188. inode->i_state |= I_SYNC;
  1189. wbc_attach_and_unlock_inode(wbc, inode);
  1190. ret = __writeback_single_inode(inode, wbc);
  1191. wbc_detach_inode(wbc);
  1192. spin_lock(&wb->list_lock);
  1193. spin_lock(&inode->i_lock);
  1194. /*
  1195. * If inode is clean, remove it from writeback lists. Otherwise don't
  1196. * touch it. See comment above for explanation.
  1197. */
  1198. if (!(inode->i_state & I_DIRTY_ALL))
  1199. inode_io_list_del_locked(inode, wb);
  1200. spin_unlock(&wb->list_lock);
  1201. inode_sync_complete(inode);
  1202. out:
  1203. spin_unlock(&inode->i_lock);
  1204. return ret;
  1205. }
  1206. static long writeback_chunk_size(struct bdi_writeback *wb,
  1207. struct wb_writeback_work *work)
  1208. {
  1209. long pages;
  1210. /*
  1211. * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
  1212. * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
  1213. * here avoids calling into writeback_inodes_wb() more than once.
  1214. *
  1215. * The intended call sequence for WB_SYNC_ALL writeback is:
  1216. *
  1217. * wb_writeback()
  1218. * writeback_sb_inodes() <== called only once
  1219. * write_cache_pages() <== called once for each inode
  1220. * (quickly) tag currently dirty pages
  1221. * (maybe slowly) sync all tagged pages
  1222. */
  1223. if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
  1224. pages = LONG_MAX;
  1225. else {
  1226. pages = min(wb->avg_write_bandwidth / 2,
  1227. global_wb_domain.dirty_limit / DIRTY_SCOPE);
  1228. pages = min(pages, work->nr_pages);
  1229. pages = round_down(pages + MIN_WRITEBACK_PAGES,
  1230. MIN_WRITEBACK_PAGES);
  1231. }
  1232. return pages;
  1233. }
  1234. /*
  1235. * Write a portion of b_io inodes which belong to @sb.
  1236. *
  1237. * Return the number of pages and/or inodes written.
  1238. *
  1239. * NOTE! This is called with wb->list_lock held, and will
  1240. * unlock and relock that for each inode it ends up doing
  1241. * IO for.
  1242. */
  1243. static long writeback_sb_inodes(struct super_block *sb,
  1244. struct bdi_writeback *wb,
  1245. struct wb_writeback_work *work)
  1246. {
  1247. struct writeback_control wbc = {
  1248. .sync_mode = work->sync_mode,
  1249. .tagged_writepages = work->tagged_writepages,
  1250. .for_kupdate = work->for_kupdate,
  1251. .for_background = work->for_background,
  1252. .for_sync = work->for_sync,
  1253. .range_cyclic = work->range_cyclic,
  1254. .range_start = 0,
  1255. .range_end = LLONG_MAX,
  1256. };
  1257. unsigned long start_time = jiffies;
  1258. long write_chunk;
  1259. long wrote = 0; /* count both pages and inodes */
  1260. while (!list_empty(&wb->b_io)) {
  1261. struct inode *inode = wb_inode(wb->b_io.prev);
  1262. if (inode->i_sb != sb) {
  1263. if (work->sb) {
  1264. /*
  1265. * We only want to write back data for this
  1266. * superblock, move all inodes not belonging
  1267. * to it back onto the dirty list.
  1268. */
  1269. redirty_tail(inode, wb);
  1270. continue;
  1271. }
  1272. /*
  1273. * The inode belongs to a different superblock.
  1274. * Bounce back to the caller to unpin this and
  1275. * pin the next superblock.
  1276. */
  1277. break;
  1278. }
  1279. /*
  1280. * Don't bother with new inodes or inodes being freed, first
  1281. * kind does not need periodic writeout yet, and for the latter
  1282. * kind writeout is handled by the freer.
  1283. */
  1284. spin_lock(&inode->i_lock);
  1285. if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
  1286. spin_unlock(&inode->i_lock);
  1287. redirty_tail(inode, wb);
  1288. continue;
  1289. }
  1290. if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
  1291. /*
  1292. * If this inode is locked for writeback and we are not
  1293. * doing writeback-for-data-integrity, move it to
  1294. * b_more_io so that writeback can proceed with the
  1295. * other inodes on s_io.
  1296. *
  1297. * We'll have another go at writing back this inode
  1298. * when we completed a full scan of b_io.
  1299. */
  1300. spin_unlock(&inode->i_lock);
  1301. requeue_io(inode, wb);
  1302. trace_writeback_sb_inodes_requeue(inode);
  1303. continue;
  1304. }
  1305. spin_unlock(&wb->list_lock);
  1306. /*
  1307. * We already requeued the inode if it had I_SYNC set and we
  1308. * are doing WB_SYNC_NONE writeback. So this catches only the
  1309. * WB_SYNC_ALL case.
  1310. */
  1311. if (inode->i_state & I_SYNC) {
  1312. /* Wait for I_SYNC. This function drops i_lock... */
  1313. inode_sleep_on_writeback(inode);
  1314. /* Inode may be gone, start again */
  1315. spin_lock(&wb->list_lock);
  1316. continue;
  1317. }
  1318. inode->i_state |= I_SYNC;
  1319. wbc_attach_and_unlock_inode(&wbc, inode);
  1320. write_chunk = writeback_chunk_size(wb, work);
  1321. wbc.nr_to_write = write_chunk;
  1322. wbc.pages_skipped = 0;
  1323. /*
  1324. * We use I_SYNC to pin the inode in memory. While it is set
  1325. * evict_inode() will wait so the inode cannot be freed.
  1326. */
  1327. __writeback_single_inode(inode, &wbc);
  1328. wbc_detach_inode(&wbc);
  1329. work->nr_pages -= write_chunk - wbc.nr_to_write;
  1330. wrote += write_chunk - wbc.nr_to_write;
  1331. if (need_resched()) {
  1332. /*
  1333. * We're trying to balance between building up a nice
  1334. * long list of IOs to improve our merge rate, and
  1335. * getting those IOs out quickly for anyone throttling
  1336. * in balance_dirty_pages(). cond_resched() doesn't
  1337. * unplug, so get our IOs out the door before we
  1338. * give up the CPU.
  1339. */
  1340. blk_flush_plug(current);
  1341. cond_resched();
  1342. }
  1343. spin_lock(&wb->list_lock);
  1344. spin_lock(&inode->i_lock);
  1345. if (!(inode->i_state & I_DIRTY_ALL))
  1346. wrote++;
  1347. requeue_inode(inode, wb, &wbc);
  1348. inode_sync_complete(inode);
  1349. spin_unlock(&inode->i_lock);
  1350. /*
  1351. * bail out to wb_writeback() often enough to check
  1352. * background threshold and other termination conditions.
  1353. */
  1354. if (wrote) {
  1355. if (time_is_before_jiffies(start_time + HZ / 10UL))
  1356. break;
  1357. if (work->nr_pages <= 0)
  1358. break;
  1359. }
  1360. }
  1361. return wrote;
  1362. }
  1363. static long __writeback_inodes_wb(struct bdi_writeback *wb,
  1364. struct wb_writeback_work *work)
  1365. {
  1366. unsigned long start_time = jiffies;
  1367. long wrote = 0;
  1368. while (!list_empty(&wb->b_io)) {
  1369. struct inode *inode = wb_inode(wb->b_io.prev);
  1370. struct super_block *sb = inode->i_sb;
  1371. if (!trylock_super(sb)) {
  1372. /*
  1373. * trylock_super() may fail consistently due to
  1374. * s_umount being grabbed by someone else. Don't use
  1375. * requeue_io() to avoid busy retrying the inode/sb.
  1376. */
  1377. redirty_tail(inode, wb);
  1378. continue;
  1379. }
  1380. wrote += writeback_sb_inodes(sb, wb, work);
  1381. up_read(&sb->s_umount);
  1382. /* refer to the same tests at the end of writeback_sb_inodes */
  1383. if (wrote) {
  1384. if (time_is_before_jiffies(start_time + HZ / 10UL))
  1385. break;
  1386. if (work->nr_pages <= 0)
  1387. break;
  1388. }
  1389. }
  1390. /* Leave any unwritten inodes on b_io */
  1391. return wrote;
  1392. }
  1393. static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
  1394. enum wb_reason reason)
  1395. {
  1396. struct wb_writeback_work work = {
  1397. .nr_pages = nr_pages,
  1398. .sync_mode = WB_SYNC_NONE,
  1399. .range_cyclic = 1,
  1400. .reason = reason,
  1401. };
  1402. struct blk_plug plug;
  1403. blk_start_plug(&plug);
  1404. spin_lock(&wb->list_lock);
  1405. if (list_empty(&wb->b_io))
  1406. queue_io(wb, &work);
  1407. __writeback_inodes_wb(wb, &work);
  1408. spin_unlock(&wb->list_lock);
  1409. blk_finish_plug(&plug);
  1410. return nr_pages - work.nr_pages;
  1411. }
  1412. /*
  1413. * Explicit flushing or periodic writeback of "old" data.
  1414. *
  1415. * Define "old": the first time one of an inode's pages is dirtied, we mark the
  1416. * dirtying-time in the inode's address_space. So this periodic writeback code
  1417. * just walks the superblock inode list, writing back any inodes which are
  1418. * older than a specific point in time.
  1419. *
  1420. * Try to run once per dirty_writeback_interval. But if a writeback event
  1421. * takes longer than a dirty_writeback_interval interval, then leave a
  1422. * one-second gap.
  1423. *
  1424. * older_than_this takes precedence over nr_to_write. So we'll only write back
  1425. * all dirty pages if they are all attached to "old" mappings.
  1426. */
  1427. static long wb_writeback(struct bdi_writeback *wb,
  1428. struct wb_writeback_work *work)
  1429. {
  1430. unsigned long wb_start = jiffies;
  1431. long nr_pages = work->nr_pages;
  1432. unsigned long oldest_jif;
  1433. struct inode *inode;
  1434. long progress;
  1435. struct blk_plug plug;
  1436. oldest_jif = jiffies;
  1437. work->older_than_this = &oldest_jif;
  1438. blk_start_plug(&plug);
  1439. spin_lock(&wb->list_lock);
  1440. for (;;) {
  1441. /*
  1442. * Stop writeback when nr_pages has been consumed
  1443. */
  1444. if (work->nr_pages <= 0)
  1445. break;
  1446. /*
  1447. * Background writeout and kupdate-style writeback may
  1448. * run forever. Stop them if there is other work to do
  1449. * so that e.g. sync can proceed. They'll be restarted
  1450. * after the other works are all done.
  1451. */
  1452. if ((work->for_background || work->for_kupdate) &&
  1453. !list_empty(&wb->work_list))
  1454. break;
  1455. /*
  1456. * For background writeout, stop when we are below the
  1457. * background dirty threshold
  1458. */
  1459. if (work->for_background && !wb_over_bg_thresh(wb))
  1460. break;
  1461. /*
  1462. * Kupdate and background works are special and we want to
  1463. * include all inodes that need writing. Livelock avoidance is
  1464. * handled by these works yielding to any other work so we are
  1465. * safe.
  1466. */
  1467. if (work->for_kupdate) {
  1468. oldest_jif = jiffies -
  1469. msecs_to_jiffies(dirty_expire_interval * 10);
  1470. } else if (work->for_background)
  1471. oldest_jif = jiffies;
  1472. trace_writeback_start(wb, work);
  1473. if (list_empty(&wb->b_io))
  1474. queue_io(wb, work);
  1475. if (work->sb)
  1476. progress = writeback_sb_inodes(work->sb, wb, work);
  1477. else
  1478. progress = __writeback_inodes_wb(wb, work);
  1479. trace_writeback_written(wb, work);
  1480. wb_update_bandwidth(wb, wb_start);
  1481. /*
  1482. * Did we write something? Try for more
  1483. *
  1484. * Dirty inodes are moved to b_io for writeback in batches.
  1485. * The completion of the current batch does not necessarily
  1486. * mean the overall work is done. So we keep looping as long
  1487. * as made some progress on cleaning pages or inodes.
  1488. */
  1489. if (progress)
  1490. continue;
  1491. /*
  1492. * No more inodes for IO, bail
  1493. */
  1494. if (list_empty(&wb->b_more_io))
  1495. break;
  1496. /*
  1497. * Nothing written. Wait for some inode to
  1498. * become available for writeback. Otherwise
  1499. * we'll just busyloop.
  1500. */
  1501. if (!list_empty(&wb->b_more_io)) {
  1502. trace_writeback_wait(wb, work);
  1503. inode = wb_inode(wb->b_more_io.prev);
  1504. spin_lock(&inode->i_lock);
  1505. spin_unlock(&wb->list_lock);
  1506. /* This function drops i_lock... */
  1507. inode_sleep_on_writeback(inode);
  1508. spin_lock(&wb->list_lock);
  1509. }
  1510. }
  1511. spin_unlock(&wb->list_lock);
  1512. blk_finish_plug(&plug);
  1513. return nr_pages - work->nr_pages;
  1514. }
  1515. /*
  1516. * Return the next wb_writeback_work struct that hasn't been processed yet.
  1517. */
  1518. static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
  1519. {
  1520. struct wb_writeback_work *work = NULL;
  1521. spin_lock_bh(&wb->work_lock);
  1522. if (!list_empty(&wb->work_list)) {
  1523. work = list_entry(wb->work_list.next,
  1524. struct wb_writeback_work, list);
  1525. list_del_init(&work->list);
  1526. }
  1527. spin_unlock_bh(&wb->work_lock);
  1528. return work;
  1529. }
  1530. /*
  1531. * Add in the number of potentially dirty inodes, because each inode
  1532. * write can dirty pagecache in the underlying blockdev.
  1533. */
  1534. static unsigned long get_nr_dirty_pages(void)
  1535. {
  1536. return global_page_state(NR_FILE_DIRTY) +
  1537. global_page_state(NR_UNSTABLE_NFS) +
  1538. get_nr_dirty_inodes();
  1539. }
  1540. static long wb_check_background_flush(struct bdi_writeback *wb)
  1541. {
  1542. if (wb_over_bg_thresh(wb)) {
  1543. struct wb_writeback_work work = {
  1544. .nr_pages = LONG_MAX,
  1545. .sync_mode = WB_SYNC_NONE,
  1546. .for_background = 1,
  1547. .range_cyclic = 1,
  1548. .reason = WB_REASON_BACKGROUND,
  1549. };
  1550. return wb_writeback(wb, &work);
  1551. }
  1552. return 0;
  1553. }
  1554. static long wb_check_old_data_flush(struct bdi_writeback *wb)
  1555. {
  1556. unsigned long expired;
  1557. long nr_pages;
  1558. /*
  1559. * When set to zero, disable periodic writeback
  1560. */
  1561. if (!dirty_writeback_interval)
  1562. return 0;
  1563. expired = wb->last_old_flush +
  1564. msecs_to_jiffies(dirty_writeback_interval * 10);
  1565. if (time_before(jiffies, expired))
  1566. return 0;
  1567. wb->last_old_flush = jiffies;
  1568. nr_pages = get_nr_dirty_pages();
  1569. if (nr_pages) {
  1570. struct wb_writeback_work work = {
  1571. .nr_pages = nr_pages,
  1572. .sync_mode = WB_SYNC_NONE,
  1573. .for_kupdate = 1,
  1574. .range_cyclic = 1,
  1575. .reason = WB_REASON_PERIODIC,
  1576. };
  1577. return wb_writeback(wb, &work);
  1578. }
  1579. return 0;
  1580. }
  1581. /*
  1582. * Retrieve work items and do the writeback they describe
  1583. */
  1584. static long wb_do_writeback(struct bdi_writeback *wb)
  1585. {
  1586. struct wb_writeback_work *work;
  1587. long wrote = 0;
  1588. set_bit(WB_writeback_running, &wb->state);
  1589. while ((work = get_next_work_item(wb)) != NULL) {
  1590. struct wb_completion *done = work->done;
  1591. trace_writeback_exec(wb, work);
  1592. wrote += wb_writeback(wb, work);
  1593. if (work->auto_free)
  1594. kfree(work);
  1595. if (done && atomic_dec_and_test(&done->cnt))
  1596. wake_up_all(&wb->bdi->wb_waitq);
  1597. }
  1598. /*
  1599. * Check for periodic writeback, kupdated() style
  1600. */
  1601. wrote += wb_check_old_data_flush(wb);
  1602. wrote += wb_check_background_flush(wb);
  1603. clear_bit(WB_writeback_running, &wb->state);
  1604. return wrote;
  1605. }
  1606. /*
  1607. * Handle writeback of dirty data for the device backed by this bdi. Also
  1608. * reschedules periodically and does kupdated style flushing.
  1609. */
  1610. void wb_workfn(struct work_struct *work)
  1611. {
  1612. struct bdi_writeback *wb = container_of(to_delayed_work(work),
  1613. struct bdi_writeback, dwork);
  1614. long pages_written;
  1615. set_worker_desc("flush-%s", dev_name(wb->bdi->dev));
  1616. current->flags |= PF_SWAPWRITE;
  1617. if (likely(!current_is_workqueue_rescuer() ||
  1618. !test_bit(WB_registered, &wb->state))) {
  1619. /*
  1620. * The normal path. Keep writing back @wb until its
  1621. * work_list is empty. Note that this path is also taken
  1622. * if @wb is shutting down even when we're running off the
  1623. * rescuer as work_list needs to be drained.
  1624. */
  1625. do {
  1626. pages_written = wb_do_writeback(wb);
  1627. trace_writeback_pages_written(pages_written);
  1628. } while (!list_empty(&wb->work_list));
  1629. } else {
  1630. /*
  1631. * bdi_wq can't get enough workers and we're running off
  1632. * the emergency worker. Don't hog it. Hopefully, 1024 is
  1633. * enough for efficient IO.
  1634. */
  1635. pages_written = writeback_inodes_wb(wb, 1024,
  1636. WB_REASON_FORKER_THREAD);
  1637. trace_writeback_pages_written(pages_written);
  1638. }
  1639. if (!list_empty(&wb->work_list))
  1640. mod_delayed_work(bdi_wq, &wb->dwork, 0);
  1641. else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
  1642. wb_wakeup_delayed(wb);
  1643. current->flags &= ~PF_SWAPWRITE;
  1644. }
  1645. /*
  1646. * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
  1647. * the whole world.
  1648. */
  1649. void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
  1650. {
  1651. struct backing_dev_info *bdi;
  1652. if (!nr_pages)
  1653. nr_pages = get_nr_dirty_pages();
  1654. rcu_read_lock();
  1655. list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
  1656. struct bdi_writeback *wb;
  1657. if (!bdi_has_dirty_io(bdi))
  1658. continue;
  1659. list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
  1660. wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages),
  1661. false, reason);
  1662. }
  1663. rcu_read_unlock();
  1664. }
  1665. /*
  1666. * Wake up bdi's periodically to make sure dirtytime inodes gets
  1667. * written back periodically. We deliberately do *not* check the
  1668. * b_dirtytime list in wb_has_dirty_io(), since this would cause the
  1669. * kernel to be constantly waking up once there are any dirtytime
  1670. * inodes on the system. So instead we define a separate delayed work
  1671. * function which gets called much more rarely. (By default, only
  1672. * once every 12 hours.)
  1673. *
  1674. * If there is any other write activity going on in the file system,
  1675. * this function won't be necessary. But if the only thing that has
  1676. * happened on the file system is a dirtytime inode caused by an atime
  1677. * update, we need this infrastructure below to make sure that inode
  1678. * eventually gets pushed out to disk.
  1679. */
  1680. static void wakeup_dirtytime_writeback(struct work_struct *w);
  1681. static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
  1682. static void wakeup_dirtytime_writeback(struct work_struct *w)
  1683. {
  1684. struct backing_dev_info *bdi;
  1685. rcu_read_lock();
  1686. list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
  1687. struct bdi_writeback *wb;
  1688. list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
  1689. if (!list_empty(&wb->b_dirty_time))
  1690. wb_wakeup(wb);
  1691. }
  1692. rcu_read_unlock();
  1693. schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
  1694. }
  1695. static int __init start_dirtytime_writeback(void)
  1696. {
  1697. schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
  1698. return 0;
  1699. }
  1700. __initcall(start_dirtytime_writeback);
  1701. int dirtytime_interval_handler(struct ctl_table *table, int write,
  1702. void __user *buffer, size_t *lenp, loff_t *ppos)
  1703. {
  1704. int ret;
  1705. ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  1706. if (ret == 0 && write)
  1707. mod_delayed_work(system_wq, &dirtytime_work, 0);
  1708. return ret;
  1709. }
  1710. static noinline void block_dump___mark_inode_dirty(struct inode *inode)
  1711. {
  1712. if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
  1713. struct dentry *dentry;
  1714. const char *name = "?";
  1715. dentry = d_find_alias(inode);
  1716. if (dentry) {
  1717. spin_lock(&dentry->d_lock);
  1718. name = (const char *) dentry->d_name.name;
  1719. }
  1720. printk(KERN_DEBUG
  1721. "%s(%d): dirtied inode %lu (%s) on %s\n",
  1722. current->comm, task_pid_nr(current), inode->i_ino,
  1723. name, inode->i_sb->s_id);
  1724. if (dentry) {
  1725. spin_unlock(&dentry->d_lock);
  1726. dput(dentry);
  1727. }
  1728. }
  1729. }
  1730. /**
  1731. * __mark_inode_dirty - internal function
  1732. * @inode: inode to mark
  1733. * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
  1734. * Mark an inode as dirty. Callers should use mark_inode_dirty or
  1735. * mark_inode_dirty_sync.
  1736. *
  1737. * Put the inode on the super block's dirty list.
  1738. *
  1739. * CAREFUL! We mark it dirty unconditionally, but move it onto the
  1740. * dirty list only if it is hashed or if it refers to a blockdev.
  1741. * If it was not hashed, it will never be added to the dirty list
  1742. * even if it is later hashed, as it will have been marked dirty already.
  1743. *
  1744. * In short, make sure you hash any inodes _before_ you start marking
  1745. * them dirty.
  1746. *
  1747. * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
  1748. * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
  1749. * the kernel-internal blockdev inode represents the dirtying time of the
  1750. * blockdev's pages. This is why for I_DIRTY_PAGES we always use
  1751. * page->mapping->host, so the page-dirtying time is recorded in the internal
  1752. * blockdev inode.
  1753. */
  1754. void __mark_inode_dirty(struct inode *inode, int flags)
  1755. {
  1756. #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
  1757. struct super_block *sb = inode->i_sb;
  1758. int dirtytime;
  1759. trace_writeback_mark_inode_dirty(inode, flags);
  1760. /*
  1761. * Don't do this for I_DIRTY_PAGES - that doesn't actually
  1762. * dirty the inode itself
  1763. */
  1764. if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) {
  1765. trace_writeback_dirty_inode_start(inode, flags);
  1766. if (sb->s_op->dirty_inode)
  1767. sb->s_op->dirty_inode(inode, flags);
  1768. trace_writeback_dirty_inode(inode, flags);
  1769. }
  1770. if (flags & I_DIRTY_INODE)
  1771. flags &= ~I_DIRTY_TIME;
  1772. dirtytime = flags & I_DIRTY_TIME;
  1773. /*
  1774. * Paired with smp_mb() in __writeback_single_inode() for the
  1775. * following lockless i_state test. See there for details.
  1776. */
  1777. smp_mb();
  1778. if (((inode->i_state & flags) == flags) ||
  1779. (dirtytime && (inode->i_state & I_DIRTY_INODE)))
  1780. return;
  1781. if (unlikely(block_dump))
  1782. block_dump___mark_inode_dirty(inode);
  1783. spin_lock(&inode->i_lock);
  1784. if (dirtytime && (inode->i_state & I_DIRTY_INODE))
  1785. goto out_unlock_inode;
  1786. if ((inode->i_state & flags) != flags) {
  1787. const int was_dirty = inode->i_state & I_DIRTY;
  1788. inode_attach_wb(inode, NULL);
  1789. if (flags & I_DIRTY_INODE)
  1790. inode->i_state &= ~I_DIRTY_TIME;
  1791. inode->i_state |= flags;
  1792. /*
  1793. * If the inode is being synced, just update its dirty state.
  1794. * The unlocker will place the inode on the appropriate
  1795. * superblock list, based upon its state.
  1796. */
  1797. if (inode->i_state & I_SYNC)
  1798. goto out_unlock_inode;
  1799. /*
  1800. * Only add valid (hashed) inodes to the superblock's
  1801. * dirty list. Add blockdev inodes as well.
  1802. */
  1803. if (!S_ISBLK(inode->i_mode)) {
  1804. if (inode_unhashed(inode))
  1805. goto out_unlock_inode;
  1806. }
  1807. if (inode->i_state & I_FREEING)
  1808. goto out_unlock_inode;
  1809. /*
  1810. * If the inode was already on b_dirty/b_io/b_more_io, don't
  1811. * reposition it (that would break b_dirty time-ordering).
  1812. */
  1813. if (!was_dirty) {
  1814. struct bdi_writeback *wb;
  1815. struct list_head *dirty_list;
  1816. bool wakeup_bdi = false;
  1817. wb = locked_inode_to_wb_and_lock_list(inode);
  1818. WARN(bdi_cap_writeback_dirty(wb->bdi) &&
  1819. !test_bit(WB_registered, &wb->state),
  1820. "bdi-%s not registered\n", wb->bdi->name);
  1821. inode->dirtied_when = jiffies;
  1822. if (dirtytime)
  1823. inode->dirtied_time_when = jiffies;
  1824. if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES))
  1825. dirty_list = &wb->b_dirty;
  1826. else
  1827. dirty_list = &wb->b_dirty_time;
  1828. wakeup_bdi = inode_io_list_move_locked(inode, wb,
  1829. dirty_list);
  1830. spin_unlock(&wb->list_lock);
  1831. trace_writeback_dirty_inode_enqueue(inode);
  1832. /*
  1833. * If this is the first dirty inode for this bdi,
  1834. * we have to wake-up the corresponding bdi thread
  1835. * to make sure background write-back happens
  1836. * later.
  1837. */
  1838. if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi)
  1839. wb_wakeup_delayed(wb);
  1840. return;
  1841. }
  1842. }
  1843. out_unlock_inode:
  1844. spin_unlock(&inode->i_lock);
  1845. #undef I_DIRTY_INODE
  1846. }
  1847. EXPORT_SYMBOL(__mark_inode_dirty);
  1848. /*
  1849. * The @s_sync_lock is used to serialise concurrent sync operations
  1850. * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
  1851. * Concurrent callers will block on the s_sync_lock rather than doing contending
  1852. * walks. The queueing maintains sync(2) required behaviour as all the IO that
  1853. * has been issued up to the time this function is enter is guaranteed to be
  1854. * completed by the time we have gained the lock and waited for all IO that is
  1855. * in progress regardless of the order callers are granted the lock.
  1856. */
  1857. static void wait_sb_inodes(struct super_block *sb)
  1858. {
  1859. struct inode *inode, *old_inode = NULL;
  1860. /*
  1861. * We need to be protected against the filesystem going from
  1862. * r/o to r/w or vice versa.
  1863. */
  1864. WARN_ON(!rwsem_is_locked(&sb->s_umount));
  1865. mutex_lock(&sb->s_sync_lock);
  1866. spin_lock(&sb->s_inode_list_lock);
  1867. /*
  1868. * Data integrity sync. Must wait for all pages under writeback,
  1869. * because there may have been pages dirtied before our sync
  1870. * call, but which had writeout started before we write it out.
  1871. * In which case, the inode may not be on the dirty list, but
  1872. * we still have to wait for that writeout.
  1873. */
  1874. list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
  1875. struct address_space *mapping = inode->i_mapping;
  1876. spin_lock(&inode->i_lock);
  1877. if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
  1878. (mapping->nrpages == 0)) {
  1879. spin_unlock(&inode->i_lock);
  1880. continue;
  1881. }
  1882. __iget(inode);
  1883. spin_unlock(&inode->i_lock);
  1884. spin_unlock(&sb->s_inode_list_lock);
  1885. /*
  1886. * We hold a reference to 'inode' so it couldn't have been
  1887. * removed from s_inodes list while we dropped the
  1888. * s_inode_list_lock. We cannot iput the inode now as we can
  1889. * be holding the last reference and we cannot iput it under
  1890. * s_inode_list_lock. So we keep the reference and iput it
  1891. * later.
  1892. */
  1893. iput(old_inode);
  1894. old_inode = inode;
  1895. /*
  1896. * We keep the error status of individual mapping so that
  1897. * applications can catch the writeback error using fsync(2).
  1898. * See filemap_fdatawait_keep_errors() for details.
  1899. */
  1900. filemap_fdatawait_keep_errors(mapping);
  1901. cond_resched();
  1902. spin_lock(&sb->s_inode_list_lock);
  1903. }
  1904. spin_unlock(&sb->s_inode_list_lock);
  1905. iput(old_inode);
  1906. mutex_unlock(&sb->s_sync_lock);
  1907. }
  1908. static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
  1909. enum wb_reason reason, bool skip_if_busy)
  1910. {
  1911. DEFINE_WB_COMPLETION_ONSTACK(done);
  1912. struct wb_writeback_work work = {
  1913. .sb = sb,
  1914. .sync_mode = WB_SYNC_NONE,
  1915. .tagged_writepages = 1,
  1916. .done = &done,
  1917. .nr_pages = nr,
  1918. .reason = reason,
  1919. };
  1920. struct backing_dev_info *bdi = sb->s_bdi;
  1921. if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
  1922. return;
  1923. WARN_ON(!rwsem_is_locked(&sb->s_umount));
  1924. bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
  1925. wb_wait_for_completion(bdi, &done);
  1926. }
  1927. /**
  1928. * writeback_inodes_sb_nr - writeback dirty inodes from given super_block
  1929. * @sb: the superblock
  1930. * @nr: the number of pages to write
  1931. * @reason: reason why some writeback work initiated
  1932. *
  1933. * Start writeback on some inodes on this super_block. No guarantees are made
  1934. * on how many (if any) will be written, and this function does not wait
  1935. * for IO completion of submitted IO.
  1936. */
  1937. void writeback_inodes_sb_nr(struct super_block *sb,
  1938. unsigned long nr,
  1939. enum wb_reason reason)
  1940. {
  1941. __writeback_inodes_sb_nr(sb, nr, reason, false);
  1942. }
  1943. EXPORT_SYMBOL(writeback_inodes_sb_nr);
  1944. /**
  1945. * writeback_inodes_sb - writeback dirty inodes from given super_block
  1946. * @sb: the superblock
  1947. * @reason: reason why some writeback work was initiated
  1948. *
  1949. * Start writeback on some inodes on this super_block. No guarantees are made
  1950. * on how many (if any) will be written, and this function does not wait
  1951. * for IO completion of submitted IO.
  1952. */
  1953. void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
  1954. {
  1955. return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
  1956. }
  1957. EXPORT_SYMBOL(writeback_inodes_sb);
  1958. /**
  1959. * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
  1960. * @sb: the superblock
  1961. * @nr: the number of pages to write
  1962. * @reason: the reason of writeback
  1963. *
  1964. * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
  1965. * Returns 1 if writeback was started, 0 if not.
  1966. */
  1967. bool try_to_writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
  1968. enum wb_reason reason)
  1969. {
  1970. if (!down_read_trylock(&sb->s_umount))
  1971. return false;
  1972. __writeback_inodes_sb_nr(sb, nr, reason, true);
  1973. up_read(&sb->s_umount);
  1974. return true;
  1975. }
  1976. EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
  1977. /**
  1978. * try_to_writeback_inodes_sb - try to start writeback if none underway
  1979. * @sb: the superblock
  1980. * @reason: reason why some writeback work was initiated
  1981. *
  1982. * Implement by try_to_writeback_inodes_sb_nr()
  1983. * Returns 1 if writeback was started, 0 if not.
  1984. */
  1985. bool try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
  1986. {
  1987. return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
  1988. }
  1989. EXPORT_SYMBOL(try_to_writeback_inodes_sb);
  1990. /**
  1991. * sync_inodes_sb - sync sb inode pages
  1992. * @sb: the superblock
  1993. *
  1994. * This function writes and waits on any dirty inode belonging to this
  1995. * super_block.
  1996. */
  1997. void sync_inodes_sb(struct super_block *sb)
  1998. {
  1999. DEFINE_WB_COMPLETION_ONSTACK(done);
  2000. struct wb_writeback_work work = {
  2001. .sb = sb,
  2002. .sync_mode = WB_SYNC_ALL,
  2003. .nr_pages = LONG_MAX,
  2004. .range_cyclic = 0,
  2005. .done = &done,
  2006. .reason = WB_REASON_SYNC,
  2007. .for_sync = 1,
  2008. };
  2009. struct backing_dev_info *bdi = sb->s_bdi;
  2010. /*
  2011. * Can't skip on !bdi_has_dirty() because we should wait for !dirty
  2012. * inodes under writeback and I_DIRTY_TIME inodes ignored by
  2013. * bdi_has_dirty() need to be written out too.
  2014. */
  2015. if (bdi == &noop_backing_dev_info)
  2016. return;
  2017. WARN_ON(!rwsem_is_locked(&sb->s_umount));
  2018. bdi_split_work_to_wbs(bdi, &work, false);
  2019. wb_wait_for_completion(bdi, &done);
  2020. wait_sb_inodes(sb);
  2021. }
  2022. EXPORT_SYMBOL(sync_inodes_sb);
  2023. /**
  2024. * write_inode_now - write an inode to disk
  2025. * @inode: inode to write to disk
  2026. * @sync: whether the write should be synchronous or not
  2027. *
  2028. * This function commits an inode to disk immediately if it is dirty. This is
  2029. * primarily needed by knfsd.
  2030. *
  2031. * The caller must either have a ref on the inode or must have set I_WILL_FREE.
  2032. */
  2033. int write_inode_now(struct inode *inode, int sync)
  2034. {
  2035. struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
  2036. struct writeback_control wbc = {
  2037. .nr_to_write = LONG_MAX,
  2038. .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
  2039. .range_start = 0,
  2040. .range_end = LLONG_MAX,
  2041. };
  2042. if (!mapping_cap_writeback_dirty(inode->i_mapping))
  2043. wbc.nr_to_write = 0;
  2044. might_sleep();
  2045. return writeback_single_inode(inode, wb, &wbc);
  2046. }
  2047. EXPORT_SYMBOL(write_inode_now);
  2048. /**
  2049. * sync_inode - write an inode and its pages to disk.
  2050. * @inode: the inode to sync
  2051. * @wbc: controls the writeback mode
  2052. *
  2053. * sync_inode() will write an inode and its pages to disk. It will also
  2054. * correctly update the inode on its superblock's dirty inode lists and will
  2055. * update inode->i_state.
  2056. *
  2057. * The caller must have a ref on the inode.
  2058. */
  2059. int sync_inode(struct inode *inode, struct writeback_control *wbc)
  2060. {
  2061. return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
  2062. }
  2063. EXPORT_SYMBOL(sync_inode);
  2064. /**
  2065. * sync_inode_metadata - write an inode to disk
  2066. * @inode: the inode to sync
  2067. * @wait: wait for I/O to complete.
  2068. *
  2069. * Write an inode to disk and adjust its dirty state after completion.
  2070. *
  2071. * Note: only writes the actual inode, no associated data or other metadata.
  2072. */
  2073. int sync_inode_metadata(struct inode *inode, int wait)
  2074. {
  2075. struct writeback_control wbc = {
  2076. .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
  2077. .nr_to_write = 0, /* metadata-only */
  2078. };
  2079. return sync_inode(inode, &wbc);
  2080. }
  2081. EXPORT_SYMBOL(sync_inode_metadata);