buffer.c 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420
  1. /*
  2. * linux/fs/buffer.c
  3. *
  4. * Copyright (C) 1991, 1992, 2002 Linus Torvalds
  5. */
  6. /*
  7. * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
  8. *
  9. * Removed a lot of unnecessary code and simplified things now that
  10. * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
  11. *
  12. * Speed up hash, lru, and free list operations. Use gfp() for allocating
  13. * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
  14. *
  15. * Added 32k buffer block sizes - these are required older ARM systems. - RMK
  16. *
  17. * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/syscalls.h>
  21. #include <linux/fs.h>
  22. #include <linux/mm.h>
  23. #include <linux/percpu.h>
  24. #include <linux/slab.h>
  25. #include <linux/capability.h>
  26. #include <linux/blkdev.h>
  27. #include <linux/file.h>
  28. #include <linux/quotaops.h>
  29. #include <linux/highmem.h>
  30. #include <linux/export.h>
  31. #include <linux/backing-dev.h>
  32. #include <linux/writeback.h>
  33. #include <linux/hash.h>
  34. #include <linux/suspend.h>
  35. #include <linux/buffer_head.h>
  36. #include <linux/task_io_accounting_ops.h>
  37. #include <linux/bio.h>
  38. #include <linux/notifier.h>
  39. #include <linux/cpu.h>
  40. #include <linux/bitops.h>
  41. #include <linux/mpage.h>
  42. #include <linux/bit_spinlock.h>
  43. #include <trace/events/block.h>
  44. static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
  45. static int submit_bh_wbc(int rw, struct buffer_head *bh,
  46. unsigned long bio_flags,
  47. struct writeback_control *wbc);
  48. #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
  49. void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
  50. {
  51. bh->b_end_io = handler;
  52. bh->b_private = private;
  53. }
  54. EXPORT_SYMBOL(init_buffer);
  55. inline void touch_buffer(struct buffer_head *bh)
  56. {
  57. trace_block_touch_buffer(bh);
  58. mark_page_accessed(bh->b_page);
  59. }
  60. EXPORT_SYMBOL(touch_buffer);
  61. void __lock_buffer(struct buffer_head *bh)
  62. {
  63. wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
  64. }
  65. EXPORT_SYMBOL(__lock_buffer);
  66. void unlock_buffer(struct buffer_head *bh)
  67. {
  68. clear_bit_unlock(BH_Lock, &bh->b_state);
  69. smp_mb__after_atomic();
  70. wake_up_bit(&bh->b_state, BH_Lock);
  71. }
  72. EXPORT_SYMBOL(unlock_buffer);
  73. /*
  74. * Returns if the page has dirty or writeback buffers. If all the buffers
  75. * are unlocked and clean then the PageDirty information is stale. If
  76. * any of the pages are locked, it is assumed they are locked for IO.
  77. */
  78. void buffer_check_dirty_writeback(struct page *page,
  79. bool *dirty, bool *writeback)
  80. {
  81. struct buffer_head *head, *bh;
  82. *dirty = false;
  83. *writeback = false;
  84. BUG_ON(!PageLocked(page));
  85. if (!page_has_buffers(page))
  86. return;
  87. if (PageWriteback(page))
  88. *writeback = true;
  89. head = page_buffers(page);
  90. bh = head;
  91. do {
  92. if (buffer_locked(bh))
  93. *writeback = true;
  94. if (buffer_dirty(bh))
  95. *dirty = true;
  96. bh = bh->b_this_page;
  97. } while (bh != head);
  98. }
  99. EXPORT_SYMBOL(buffer_check_dirty_writeback);
  100. /*
  101. * Block until a buffer comes unlocked. This doesn't stop it
  102. * from becoming locked again - you have to lock it yourself
  103. * if you want to preserve its state.
  104. */
  105. void __wait_on_buffer(struct buffer_head * bh)
  106. {
  107. wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
  108. }
  109. EXPORT_SYMBOL(__wait_on_buffer);
  110. static void
  111. __clear_page_buffers(struct page *page)
  112. {
  113. ClearPagePrivate(page);
  114. set_page_private(page, 0);
  115. page_cache_release(page);
  116. }
  117. static void buffer_io_error(struct buffer_head *bh, char *msg)
  118. {
  119. if (!test_bit(BH_Quiet, &bh->b_state))
  120. printk_ratelimited(KERN_ERR
  121. "Buffer I/O error on dev %pg, logical block %llu%s\n",
  122. bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
  123. }
  124. /*
  125. * End-of-IO handler helper function which does not touch the bh after
  126. * unlocking it.
  127. * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
  128. * a race there is benign: unlock_buffer() only use the bh's address for
  129. * hashing after unlocking the buffer, so it doesn't actually touch the bh
  130. * itself.
  131. */
  132. static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
  133. {
  134. if (uptodate) {
  135. set_buffer_uptodate(bh);
  136. } else {
  137. /* This happens, due to failed READA attempts. */
  138. clear_buffer_uptodate(bh);
  139. }
  140. unlock_buffer(bh);
  141. }
  142. /*
  143. * Default synchronous end-of-IO handler.. Just mark it up-to-date and
  144. * unlock the buffer. This is what ll_rw_block uses too.
  145. */
  146. void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
  147. {
  148. __end_buffer_read_notouch(bh, uptodate);
  149. put_bh(bh);
  150. }
  151. EXPORT_SYMBOL(end_buffer_read_sync);
  152. void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
  153. {
  154. if (uptodate) {
  155. set_buffer_uptodate(bh);
  156. } else {
  157. buffer_io_error(bh, ", lost sync page write");
  158. set_buffer_write_io_error(bh);
  159. clear_buffer_uptodate(bh);
  160. }
  161. unlock_buffer(bh);
  162. put_bh(bh);
  163. }
  164. EXPORT_SYMBOL(end_buffer_write_sync);
  165. /*
  166. * Various filesystems appear to want __find_get_block to be non-blocking.
  167. * But it's the page lock which protects the buffers. To get around this,
  168. * we get exclusion from try_to_free_buffers with the blockdev mapping's
  169. * private_lock.
  170. *
  171. * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
  172. * may be quite high. This code could TryLock the page, and if that
  173. * succeeds, there is no need to take private_lock. (But if
  174. * private_lock is contended then so is mapping->tree_lock).
  175. */
  176. static struct buffer_head *
  177. __find_get_block_slow(struct block_device *bdev, sector_t block)
  178. {
  179. struct inode *bd_inode = bdev->bd_inode;
  180. struct address_space *bd_mapping = bd_inode->i_mapping;
  181. struct buffer_head *ret = NULL;
  182. pgoff_t index;
  183. struct buffer_head *bh;
  184. struct buffer_head *head;
  185. struct page *page;
  186. int all_mapped = 1;
  187. index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
  188. page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
  189. if (!page)
  190. goto out;
  191. spin_lock(&bd_mapping->private_lock);
  192. if (!page_has_buffers(page))
  193. goto out_unlock;
  194. head = page_buffers(page);
  195. bh = head;
  196. do {
  197. if (!buffer_mapped(bh))
  198. all_mapped = 0;
  199. else if (bh->b_blocknr == block) {
  200. ret = bh;
  201. get_bh(bh);
  202. goto out_unlock;
  203. }
  204. bh = bh->b_this_page;
  205. } while (bh != head);
  206. /* we might be here because some of the buffers on this page are
  207. * not mapped. This is due to various races between
  208. * file io on the block device and getblk. It gets dealt with
  209. * elsewhere, don't buffer_error if we had some unmapped buffers
  210. */
  211. if (all_mapped) {
  212. printk("__find_get_block_slow() failed. "
  213. "block=%llu, b_blocknr=%llu\n",
  214. (unsigned long long)block,
  215. (unsigned long long)bh->b_blocknr);
  216. printk("b_state=0x%08lx, b_size=%zu\n",
  217. bh->b_state, bh->b_size);
  218. printk("device %pg blocksize: %d\n", bdev,
  219. 1 << bd_inode->i_blkbits);
  220. }
  221. out_unlock:
  222. spin_unlock(&bd_mapping->private_lock);
  223. page_cache_release(page);
  224. out:
  225. return ret;
  226. }
  227. /*
  228. * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
  229. */
  230. static void free_more_memory(void)
  231. {
  232. struct zone *zone;
  233. int nid;
  234. wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
  235. yield();
  236. for_each_online_node(nid) {
  237. (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
  238. gfp_zone(GFP_NOFS), NULL,
  239. &zone);
  240. if (zone)
  241. try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
  242. GFP_NOFS, NULL);
  243. }
  244. }
  245. /*
  246. * I/O completion handler for block_read_full_page() - pages
  247. * which come unlocked at the end of I/O.
  248. */
  249. static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
  250. {
  251. unsigned long flags;
  252. struct buffer_head *first;
  253. struct buffer_head *tmp;
  254. struct page *page;
  255. int page_uptodate = 1;
  256. BUG_ON(!buffer_async_read(bh));
  257. page = bh->b_page;
  258. if (uptodate) {
  259. set_buffer_uptodate(bh);
  260. } else {
  261. clear_buffer_uptodate(bh);
  262. buffer_io_error(bh, ", async page read");
  263. SetPageError(page);
  264. }
  265. /*
  266. * Be _very_ careful from here on. Bad things can happen if
  267. * two buffer heads end IO at almost the same time and both
  268. * decide that the page is now completely done.
  269. */
  270. first = page_buffers(page);
  271. local_irq_save(flags);
  272. bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
  273. clear_buffer_async_read(bh);
  274. unlock_buffer(bh);
  275. tmp = bh;
  276. do {
  277. if (!buffer_uptodate(tmp))
  278. page_uptodate = 0;
  279. if (buffer_async_read(tmp)) {
  280. BUG_ON(!buffer_locked(tmp));
  281. goto still_busy;
  282. }
  283. tmp = tmp->b_this_page;
  284. } while (tmp != bh);
  285. bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
  286. local_irq_restore(flags);
  287. /*
  288. * If none of the buffers had errors and they are all
  289. * uptodate then we can set the page uptodate.
  290. */
  291. if (page_uptodate && !PageError(page))
  292. SetPageUptodate(page);
  293. unlock_page(page);
  294. return;
  295. still_busy:
  296. bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
  297. local_irq_restore(flags);
  298. return;
  299. }
  300. /*
  301. * Completion handler for block_write_full_page() - pages which are unlocked
  302. * during I/O, and which have PageWriteback cleared upon I/O completion.
  303. */
  304. void end_buffer_async_write(struct buffer_head *bh, int uptodate)
  305. {
  306. unsigned long flags;
  307. struct buffer_head *first;
  308. struct buffer_head *tmp;
  309. struct page *page;
  310. BUG_ON(!buffer_async_write(bh));
  311. page = bh->b_page;
  312. if (uptodate) {
  313. set_buffer_uptodate(bh);
  314. } else {
  315. buffer_io_error(bh, ", lost async page write");
  316. set_bit(AS_EIO, &page->mapping->flags);
  317. set_buffer_write_io_error(bh);
  318. clear_buffer_uptodate(bh);
  319. SetPageError(page);
  320. }
  321. first = page_buffers(page);
  322. local_irq_save(flags);
  323. bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
  324. clear_buffer_async_write(bh);
  325. unlock_buffer(bh);
  326. tmp = bh->b_this_page;
  327. while (tmp != bh) {
  328. if (buffer_async_write(tmp)) {
  329. BUG_ON(!buffer_locked(tmp));
  330. goto still_busy;
  331. }
  332. tmp = tmp->b_this_page;
  333. }
  334. bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
  335. local_irq_restore(flags);
  336. end_page_writeback(page);
  337. return;
  338. still_busy:
  339. bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
  340. local_irq_restore(flags);
  341. return;
  342. }
  343. EXPORT_SYMBOL(end_buffer_async_write);
  344. /*
  345. * If a page's buffers are under async readin (end_buffer_async_read
  346. * completion) then there is a possibility that another thread of
  347. * control could lock one of the buffers after it has completed
  348. * but while some of the other buffers have not completed. This
  349. * locked buffer would confuse end_buffer_async_read() into not unlocking
  350. * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
  351. * that this buffer is not under async I/O.
  352. *
  353. * The page comes unlocked when it has no locked buffer_async buffers
  354. * left.
  355. *
  356. * PageLocked prevents anyone starting new async I/O reads any of
  357. * the buffers.
  358. *
  359. * PageWriteback is used to prevent simultaneous writeout of the same
  360. * page.
  361. *
  362. * PageLocked prevents anyone from starting writeback of a page which is
  363. * under read I/O (PageWriteback is only ever set against a locked page).
  364. */
  365. static void mark_buffer_async_read(struct buffer_head *bh)
  366. {
  367. bh->b_end_io = end_buffer_async_read;
  368. set_buffer_async_read(bh);
  369. }
  370. static void mark_buffer_async_write_endio(struct buffer_head *bh,
  371. bh_end_io_t *handler)
  372. {
  373. bh->b_end_io = handler;
  374. set_buffer_async_write(bh);
  375. }
  376. void mark_buffer_async_write(struct buffer_head *bh)
  377. {
  378. mark_buffer_async_write_endio(bh, end_buffer_async_write);
  379. }
  380. EXPORT_SYMBOL(mark_buffer_async_write);
  381. /*
  382. * fs/buffer.c contains helper functions for buffer-backed address space's
  383. * fsync functions. A common requirement for buffer-based filesystems is
  384. * that certain data from the backing blockdev needs to be written out for
  385. * a successful fsync(). For example, ext2 indirect blocks need to be
  386. * written back and waited upon before fsync() returns.
  387. *
  388. * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
  389. * inode_has_buffers() and invalidate_inode_buffers() are provided for the
  390. * management of a list of dependent buffers at ->i_mapping->private_list.
  391. *
  392. * Locking is a little subtle: try_to_free_buffers() will remove buffers
  393. * from their controlling inode's queue when they are being freed. But
  394. * try_to_free_buffers() will be operating against the *blockdev* mapping
  395. * at the time, not against the S_ISREG file which depends on those buffers.
  396. * So the locking for private_list is via the private_lock in the address_space
  397. * which backs the buffers. Which is different from the address_space
  398. * against which the buffers are listed. So for a particular address_space,
  399. * mapping->private_lock does *not* protect mapping->private_list! In fact,
  400. * mapping->private_list will always be protected by the backing blockdev's
  401. * ->private_lock.
  402. *
  403. * Which introduces a requirement: all buffers on an address_space's
  404. * ->private_list must be from the same address_space: the blockdev's.
  405. *
  406. * address_spaces which do not place buffers at ->private_list via these
  407. * utility functions are free to use private_lock and private_list for
  408. * whatever they want. The only requirement is that list_empty(private_list)
  409. * be true at clear_inode() time.
  410. *
  411. * FIXME: clear_inode should not call invalidate_inode_buffers(). The
  412. * filesystems should do that. invalidate_inode_buffers() should just go
  413. * BUG_ON(!list_empty).
  414. *
  415. * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
  416. * take an address_space, not an inode. And it should be called
  417. * mark_buffer_dirty_fsync() to clearly define why those buffers are being
  418. * queued up.
  419. *
  420. * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
  421. * list if it is already on a list. Because if the buffer is on a list,
  422. * it *must* already be on the right one. If not, the filesystem is being
  423. * silly. This will save a ton of locking. But first we have to ensure
  424. * that buffers are taken *off* the old inode's list when they are freed
  425. * (presumably in truncate). That requires careful auditing of all
  426. * filesystems (do it inside bforget()). It could also be done by bringing
  427. * b_inode back.
  428. */
  429. /*
  430. * The buffer's backing address_space's private_lock must be held
  431. */
  432. static void __remove_assoc_queue(struct buffer_head *bh)
  433. {
  434. list_del_init(&bh->b_assoc_buffers);
  435. WARN_ON(!bh->b_assoc_map);
  436. if (buffer_write_io_error(bh))
  437. set_bit(AS_EIO, &bh->b_assoc_map->flags);
  438. bh->b_assoc_map = NULL;
  439. }
  440. int inode_has_buffers(struct inode *inode)
  441. {
  442. return !list_empty(&inode->i_data.private_list);
  443. }
  444. /*
  445. * osync is designed to support O_SYNC io. It waits synchronously for
  446. * all already-submitted IO to complete, but does not queue any new
  447. * writes to the disk.
  448. *
  449. * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
  450. * you dirty the buffers, and then use osync_inode_buffers to wait for
  451. * completion. Any other dirty buffers which are not yet queued for
  452. * write will not be flushed to disk by the osync.
  453. */
  454. static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
  455. {
  456. struct buffer_head *bh;
  457. struct list_head *p;
  458. int err = 0;
  459. spin_lock(lock);
  460. repeat:
  461. list_for_each_prev(p, list) {
  462. bh = BH_ENTRY(p);
  463. if (buffer_locked(bh)) {
  464. get_bh(bh);
  465. spin_unlock(lock);
  466. wait_on_buffer(bh);
  467. if (!buffer_uptodate(bh))
  468. err = -EIO;
  469. brelse(bh);
  470. spin_lock(lock);
  471. goto repeat;
  472. }
  473. }
  474. spin_unlock(lock);
  475. return err;
  476. }
  477. static void do_thaw_one(struct super_block *sb, void *unused)
  478. {
  479. while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
  480. printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
  481. }
  482. static void do_thaw_all(struct work_struct *work)
  483. {
  484. iterate_supers(do_thaw_one, NULL);
  485. kfree(work);
  486. printk(KERN_WARNING "Emergency Thaw complete\n");
  487. }
  488. /**
  489. * emergency_thaw_all -- forcibly thaw every frozen filesystem
  490. *
  491. * Used for emergency unfreeze of all filesystems via SysRq
  492. */
  493. void emergency_thaw_all(void)
  494. {
  495. struct work_struct *work;
  496. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  497. if (work) {
  498. INIT_WORK(work, do_thaw_all);
  499. schedule_work(work);
  500. }
  501. }
  502. /**
  503. * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
  504. * @mapping: the mapping which wants those buffers written
  505. *
  506. * Starts I/O against the buffers at mapping->private_list, and waits upon
  507. * that I/O.
  508. *
  509. * Basically, this is a convenience function for fsync().
  510. * @mapping is a file or directory which needs those buffers to be written for
  511. * a successful fsync().
  512. */
  513. int sync_mapping_buffers(struct address_space *mapping)
  514. {
  515. struct address_space *buffer_mapping = mapping->private_data;
  516. if (buffer_mapping == NULL || list_empty(&mapping->private_list))
  517. return 0;
  518. return fsync_buffers_list(&buffer_mapping->private_lock,
  519. &mapping->private_list);
  520. }
  521. EXPORT_SYMBOL(sync_mapping_buffers);
  522. /*
  523. * Called when we've recently written block `bblock', and it is known that
  524. * `bblock' was for a buffer_boundary() buffer. This means that the block at
  525. * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
  526. * dirty, schedule it for IO. So that indirects merge nicely with their data.
  527. */
  528. void write_boundary_block(struct block_device *bdev,
  529. sector_t bblock, unsigned blocksize)
  530. {
  531. struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
  532. if (bh) {
  533. if (buffer_dirty(bh))
  534. ll_rw_block(WRITE, 1, &bh);
  535. put_bh(bh);
  536. }
  537. }
  538. void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
  539. {
  540. struct address_space *mapping = inode->i_mapping;
  541. struct address_space *buffer_mapping = bh->b_page->mapping;
  542. mark_buffer_dirty(bh);
  543. if (!mapping->private_data) {
  544. mapping->private_data = buffer_mapping;
  545. } else {
  546. BUG_ON(mapping->private_data != buffer_mapping);
  547. }
  548. if (!bh->b_assoc_map) {
  549. spin_lock(&buffer_mapping->private_lock);
  550. list_move_tail(&bh->b_assoc_buffers,
  551. &mapping->private_list);
  552. bh->b_assoc_map = mapping;
  553. spin_unlock(&buffer_mapping->private_lock);
  554. }
  555. }
  556. EXPORT_SYMBOL(mark_buffer_dirty_inode);
  557. /*
  558. * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
  559. * dirty.
  560. *
  561. * If warn is true, then emit a warning if the page is not uptodate and has
  562. * not been truncated.
  563. *
  564. * The caller must hold mem_cgroup_begin_page_stat() lock.
  565. */
  566. static void __set_page_dirty(struct page *page, struct address_space *mapping,
  567. struct mem_cgroup *memcg, int warn)
  568. {
  569. unsigned long flags;
  570. spin_lock_irqsave(&mapping->tree_lock, flags);
  571. if (page->mapping) { /* Race with truncate? */
  572. WARN_ON_ONCE(warn && !PageUptodate(page));
  573. account_page_dirtied(page, mapping, memcg);
  574. radix_tree_tag_set(&mapping->page_tree,
  575. page_index(page), PAGECACHE_TAG_DIRTY);
  576. }
  577. spin_unlock_irqrestore(&mapping->tree_lock, flags);
  578. }
  579. /*
  580. * Add a page to the dirty page list.
  581. *
  582. * It is a sad fact of life that this function is called from several places
  583. * deeply under spinlocking. It may not sleep.
  584. *
  585. * If the page has buffers, the uptodate buffers are set dirty, to preserve
  586. * dirty-state coherency between the page and the buffers. It the page does
  587. * not have buffers then when they are later attached they will all be set
  588. * dirty.
  589. *
  590. * The buffers are dirtied before the page is dirtied. There's a small race
  591. * window in which a writepage caller may see the page cleanness but not the
  592. * buffer dirtiness. That's fine. If this code were to set the page dirty
  593. * before the buffers, a concurrent writepage caller could clear the page dirty
  594. * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
  595. * page on the dirty page list.
  596. *
  597. * We use private_lock to lock against try_to_free_buffers while using the
  598. * page's buffer list. Also use this to protect against clean buffers being
  599. * added to the page after it was set dirty.
  600. *
  601. * FIXME: may need to call ->reservepage here as well. That's rather up to the
  602. * address_space though.
  603. */
  604. int __set_page_dirty_buffers(struct page *page)
  605. {
  606. int newly_dirty;
  607. struct mem_cgroup *memcg;
  608. struct address_space *mapping = page_mapping(page);
  609. if (unlikely(!mapping))
  610. return !TestSetPageDirty(page);
  611. spin_lock(&mapping->private_lock);
  612. if (page_has_buffers(page)) {
  613. struct buffer_head *head = page_buffers(page);
  614. struct buffer_head *bh = head;
  615. do {
  616. set_buffer_dirty(bh);
  617. bh = bh->b_this_page;
  618. } while (bh != head);
  619. }
  620. /*
  621. * Use mem_group_begin_page_stat() to keep PageDirty synchronized with
  622. * per-memcg dirty page counters.
  623. */
  624. memcg = mem_cgroup_begin_page_stat(page);
  625. newly_dirty = !TestSetPageDirty(page);
  626. spin_unlock(&mapping->private_lock);
  627. if (newly_dirty)
  628. __set_page_dirty(page, mapping, memcg, 1);
  629. mem_cgroup_end_page_stat(memcg);
  630. if (newly_dirty)
  631. __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
  632. return newly_dirty;
  633. }
  634. EXPORT_SYMBOL(__set_page_dirty_buffers);
  635. /*
  636. * Write out and wait upon a list of buffers.
  637. *
  638. * We have conflicting pressures: we want to make sure that all
  639. * initially dirty buffers get waited on, but that any subsequently
  640. * dirtied buffers don't. After all, we don't want fsync to last
  641. * forever if somebody is actively writing to the file.
  642. *
  643. * Do this in two main stages: first we copy dirty buffers to a
  644. * temporary inode list, queueing the writes as we go. Then we clean
  645. * up, waiting for those writes to complete.
  646. *
  647. * During this second stage, any subsequent updates to the file may end
  648. * up refiling the buffer on the original inode's dirty list again, so
  649. * there is a chance we will end up with a buffer queued for write but
  650. * not yet completed on that list. So, as a final cleanup we go through
  651. * the osync code to catch these locked, dirty buffers without requeuing
  652. * any newly dirty buffers for write.
  653. */
  654. static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
  655. {
  656. struct buffer_head *bh;
  657. struct list_head tmp;
  658. struct address_space *mapping;
  659. int err = 0, err2;
  660. struct blk_plug plug;
  661. INIT_LIST_HEAD(&tmp);
  662. blk_start_plug(&plug);
  663. spin_lock(lock);
  664. while (!list_empty(list)) {
  665. bh = BH_ENTRY(list->next);
  666. mapping = bh->b_assoc_map;
  667. __remove_assoc_queue(bh);
  668. /* Avoid race with mark_buffer_dirty_inode() which does
  669. * a lockless check and we rely on seeing the dirty bit */
  670. smp_mb();
  671. if (buffer_dirty(bh) || buffer_locked(bh)) {
  672. list_add(&bh->b_assoc_buffers, &tmp);
  673. bh->b_assoc_map = mapping;
  674. if (buffer_dirty(bh)) {
  675. get_bh(bh);
  676. spin_unlock(lock);
  677. /*
  678. * Ensure any pending I/O completes so that
  679. * write_dirty_buffer() actually writes the
  680. * current contents - it is a noop if I/O is
  681. * still in flight on potentially older
  682. * contents.
  683. */
  684. write_dirty_buffer(bh, WRITE_SYNC);
  685. /*
  686. * Kick off IO for the previous mapping. Note
  687. * that we will not run the very last mapping,
  688. * wait_on_buffer() will do that for us
  689. * through sync_buffer().
  690. */
  691. brelse(bh);
  692. spin_lock(lock);
  693. }
  694. }
  695. }
  696. spin_unlock(lock);
  697. blk_finish_plug(&plug);
  698. spin_lock(lock);
  699. while (!list_empty(&tmp)) {
  700. bh = BH_ENTRY(tmp.prev);
  701. get_bh(bh);
  702. mapping = bh->b_assoc_map;
  703. __remove_assoc_queue(bh);
  704. /* Avoid race with mark_buffer_dirty_inode() which does
  705. * a lockless check and we rely on seeing the dirty bit */
  706. smp_mb();
  707. if (buffer_dirty(bh)) {
  708. list_add(&bh->b_assoc_buffers,
  709. &mapping->private_list);
  710. bh->b_assoc_map = mapping;
  711. }
  712. spin_unlock(lock);
  713. wait_on_buffer(bh);
  714. if (!buffer_uptodate(bh))
  715. err = -EIO;
  716. brelse(bh);
  717. spin_lock(lock);
  718. }
  719. spin_unlock(lock);
  720. err2 = osync_buffers_list(lock, list);
  721. if (err)
  722. return err;
  723. else
  724. return err2;
  725. }
  726. /*
  727. * Invalidate any and all dirty buffers on a given inode. We are
  728. * probably unmounting the fs, but that doesn't mean we have already
  729. * done a sync(). Just drop the buffers from the inode list.
  730. *
  731. * NOTE: we take the inode's blockdev's mapping's private_lock. Which
  732. * assumes that all the buffers are against the blockdev. Not true
  733. * for reiserfs.
  734. */
  735. void invalidate_inode_buffers(struct inode *inode)
  736. {
  737. if (inode_has_buffers(inode)) {
  738. struct address_space *mapping = &inode->i_data;
  739. struct list_head *list = &mapping->private_list;
  740. struct address_space *buffer_mapping = mapping->private_data;
  741. spin_lock(&buffer_mapping->private_lock);
  742. while (!list_empty(list))
  743. __remove_assoc_queue(BH_ENTRY(list->next));
  744. spin_unlock(&buffer_mapping->private_lock);
  745. }
  746. }
  747. EXPORT_SYMBOL(invalidate_inode_buffers);
  748. /*
  749. * Remove any clean buffers from the inode's buffer list. This is called
  750. * when we're trying to free the inode itself. Those buffers can pin it.
  751. *
  752. * Returns true if all buffers were removed.
  753. */
  754. int remove_inode_buffers(struct inode *inode)
  755. {
  756. int ret = 1;
  757. if (inode_has_buffers(inode)) {
  758. struct address_space *mapping = &inode->i_data;
  759. struct list_head *list = &mapping->private_list;
  760. struct address_space *buffer_mapping = mapping->private_data;
  761. spin_lock(&buffer_mapping->private_lock);
  762. while (!list_empty(list)) {
  763. struct buffer_head *bh = BH_ENTRY(list->next);
  764. if (buffer_dirty(bh)) {
  765. ret = 0;
  766. break;
  767. }
  768. __remove_assoc_queue(bh);
  769. }
  770. spin_unlock(&buffer_mapping->private_lock);
  771. }
  772. return ret;
  773. }
  774. /*
  775. * Create the appropriate buffers when given a page for data area and
  776. * the size of each buffer.. Use the bh->b_this_page linked list to
  777. * follow the buffers created. Return NULL if unable to create more
  778. * buffers.
  779. *
  780. * The retry flag is used to differentiate async IO (paging, swapping)
  781. * which may not fail from ordinary buffer allocations.
  782. */
  783. struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
  784. int retry)
  785. {
  786. struct buffer_head *bh, *head;
  787. long offset;
  788. try_again:
  789. head = NULL;
  790. offset = PAGE_SIZE;
  791. while ((offset -= size) >= 0) {
  792. bh = alloc_buffer_head(GFP_NOFS);
  793. if (!bh)
  794. goto no_grow;
  795. bh->b_this_page = head;
  796. bh->b_blocknr = -1;
  797. head = bh;
  798. bh->b_size = size;
  799. /* Link the buffer to its page */
  800. set_bh_page(bh, page, offset);
  801. }
  802. return head;
  803. /*
  804. * In case anything failed, we just free everything we got.
  805. */
  806. no_grow:
  807. if (head) {
  808. do {
  809. bh = head;
  810. head = head->b_this_page;
  811. free_buffer_head(bh);
  812. } while (head);
  813. }
  814. /*
  815. * Return failure for non-async IO requests. Async IO requests
  816. * are not allowed to fail, so we have to wait until buffer heads
  817. * become available. But we don't want tasks sleeping with
  818. * partially complete buffers, so all were released above.
  819. */
  820. if (!retry)
  821. return NULL;
  822. /* We're _really_ low on memory. Now we just
  823. * wait for old buffer heads to become free due to
  824. * finishing IO. Since this is an async request and
  825. * the reserve list is empty, we're sure there are
  826. * async buffer heads in use.
  827. */
  828. free_more_memory();
  829. goto try_again;
  830. }
  831. EXPORT_SYMBOL_GPL(alloc_page_buffers);
  832. static inline void
  833. link_dev_buffers(struct page *page, struct buffer_head *head)
  834. {
  835. struct buffer_head *bh, *tail;
  836. bh = head;
  837. do {
  838. tail = bh;
  839. bh = bh->b_this_page;
  840. } while (bh);
  841. tail->b_this_page = head;
  842. attach_page_buffers(page, head);
  843. }
  844. static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
  845. {
  846. sector_t retval = ~((sector_t)0);
  847. loff_t sz = i_size_read(bdev->bd_inode);
  848. if (sz) {
  849. unsigned int sizebits = blksize_bits(size);
  850. retval = (sz >> sizebits);
  851. }
  852. return retval;
  853. }
  854. /*
  855. * Initialise the state of a blockdev page's buffers.
  856. */
  857. static sector_t
  858. init_page_buffers(struct page *page, struct block_device *bdev,
  859. sector_t block, int size)
  860. {
  861. struct buffer_head *head = page_buffers(page);
  862. struct buffer_head *bh = head;
  863. int uptodate = PageUptodate(page);
  864. sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size);
  865. do {
  866. if (!buffer_mapped(bh)) {
  867. init_buffer(bh, NULL, NULL);
  868. bh->b_bdev = bdev;
  869. bh->b_blocknr = block;
  870. if (uptodate)
  871. set_buffer_uptodate(bh);
  872. if (block < end_block)
  873. set_buffer_mapped(bh);
  874. }
  875. block++;
  876. bh = bh->b_this_page;
  877. } while (bh != head);
  878. /*
  879. * Caller needs to validate requested block against end of device.
  880. */
  881. return end_block;
  882. }
  883. /*
  884. * Create the page-cache page that contains the requested block.
  885. *
  886. * This is used purely for blockdev mappings.
  887. */
  888. static int
  889. grow_dev_page(struct block_device *bdev, sector_t block,
  890. pgoff_t index, int size, int sizebits, gfp_t gfp)
  891. {
  892. struct inode *inode = bdev->bd_inode;
  893. struct page *page;
  894. struct buffer_head *bh;
  895. sector_t end_block;
  896. int ret = 0; /* Will call free_more_memory() */
  897. gfp_t gfp_mask;
  898. gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
  899. /*
  900. * XXX: __getblk_slow() can not really deal with failure and
  901. * will endlessly loop on improvised global reclaim. Prefer
  902. * looping in the allocator rather than here, at least that
  903. * code knows what it's doing.
  904. */
  905. gfp_mask |= __GFP_NOFAIL;
  906. page = find_or_create_page(inode->i_mapping, index, gfp_mask);
  907. if (!page)
  908. return ret;
  909. BUG_ON(!PageLocked(page));
  910. if (page_has_buffers(page)) {
  911. bh = page_buffers(page);
  912. if (bh->b_size == size) {
  913. end_block = init_page_buffers(page, bdev,
  914. (sector_t)index << sizebits,
  915. size);
  916. goto done;
  917. }
  918. if (!try_to_free_buffers(page))
  919. goto failed;
  920. }
  921. /*
  922. * Allocate some buffers for this page
  923. */
  924. bh = alloc_page_buffers(page, size, 0);
  925. if (!bh)
  926. goto failed;
  927. /*
  928. * Link the page to the buffers and initialise them. Take the
  929. * lock to be atomic wrt __find_get_block(), which does not
  930. * run under the page lock.
  931. */
  932. spin_lock(&inode->i_mapping->private_lock);
  933. link_dev_buffers(page, bh);
  934. end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
  935. size);
  936. spin_unlock(&inode->i_mapping->private_lock);
  937. done:
  938. ret = (block < end_block) ? 1 : -ENXIO;
  939. failed:
  940. unlock_page(page);
  941. page_cache_release(page);
  942. return ret;
  943. }
  944. /*
  945. * Create buffers for the specified block device block's page. If
  946. * that page was dirty, the buffers are set dirty also.
  947. */
  948. static int
  949. grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
  950. {
  951. pgoff_t index;
  952. int sizebits;
  953. sizebits = -1;
  954. do {
  955. sizebits++;
  956. } while ((size << sizebits) < PAGE_SIZE);
  957. index = block >> sizebits;
  958. /*
  959. * Check for a block which wants to lie outside our maximum possible
  960. * pagecache index. (this comparison is done using sector_t types).
  961. */
  962. if (unlikely(index != block >> sizebits)) {
  963. printk(KERN_ERR "%s: requested out-of-range block %llu for "
  964. "device %pg\n",
  965. __func__, (unsigned long long)block,
  966. bdev);
  967. return -EIO;
  968. }
  969. /* Create a page with the proper size buffers.. */
  970. return grow_dev_page(bdev, block, index, size, sizebits, gfp);
  971. }
  972. struct buffer_head *
  973. __getblk_slow(struct block_device *bdev, sector_t block,
  974. unsigned size, gfp_t gfp)
  975. {
  976. /* Size must be multiple of hard sectorsize */
  977. if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
  978. (size < 512 || size > PAGE_SIZE))) {
  979. printk(KERN_ERR "getblk(): invalid block size %d requested\n",
  980. size);
  981. printk(KERN_ERR "logical block size: %d\n",
  982. bdev_logical_block_size(bdev));
  983. dump_stack();
  984. return NULL;
  985. }
  986. for (;;) {
  987. struct buffer_head *bh;
  988. int ret;
  989. bh = __find_get_block(bdev, block, size);
  990. if (bh)
  991. return bh;
  992. ret = grow_buffers(bdev, block, size, gfp);
  993. if (ret < 0)
  994. return NULL;
  995. if (ret == 0)
  996. free_more_memory();
  997. }
  998. }
  999. EXPORT_SYMBOL(__getblk_slow);
  1000. /*
  1001. * The relationship between dirty buffers and dirty pages:
  1002. *
  1003. * Whenever a page has any dirty buffers, the page's dirty bit is set, and
  1004. * the page is tagged dirty in its radix tree.
  1005. *
  1006. * At all times, the dirtiness of the buffers represents the dirtiness of
  1007. * subsections of the page. If the page has buffers, the page dirty bit is
  1008. * merely a hint about the true dirty state.
  1009. *
  1010. * When a page is set dirty in its entirety, all its buffers are marked dirty
  1011. * (if the page has buffers).
  1012. *
  1013. * When a buffer is marked dirty, its page is dirtied, but the page's other
  1014. * buffers are not.
  1015. *
  1016. * Also. When blockdev buffers are explicitly read with bread(), they
  1017. * individually become uptodate. But their backing page remains not
  1018. * uptodate - even if all of its buffers are uptodate. A subsequent
  1019. * block_read_full_page() against that page will discover all the uptodate
  1020. * buffers, will set the page uptodate and will perform no I/O.
  1021. */
  1022. /**
  1023. * mark_buffer_dirty - mark a buffer_head as needing writeout
  1024. * @bh: the buffer_head to mark dirty
  1025. *
  1026. * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
  1027. * backing page dirty, then tag the page as dirty in its address_space's radix
  1028. * tree and then attach the address_space's inode to its superblock's dirty
  1029. * inode list.
  1030. *
  1031. * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
  1032. * mapping->tree_lock and mapping->host->i_lock.
  1033. */
  1034. void mark_buffer_dirty(struct buffer_head *bh)
  1035. {
  1036. WARN_ON_ONCE(!buffer_uptodate(bh));
  1037. trace_block_dirty_buffer(bh);
  1038. /*
  1039. * Very *carefully* optimize the it-is-already-dirty case.
  1040. *
  1041. * Don't let the final "is it dirty" escape to before we
  1042. * perhaps modified the buffer.
  1043. */
  1044. if (buffer_dirty(bh)) {
  1045. smp_mb();
  1046. if (buffer_dirty(bh))
  1047. return;
  1048. }
  1049. if (!test_set_buffer_dirty(bh)) {
  1050. struct page *page = bh->b_page;
  1051. struct address_space *mapping = NULL;
  1052. struct mem_cgroup *memcg;
  1053. memcg = mem_cgroup_begin_page_stat(page);
  1054. if (!TestSetPageDirty(page)) {
  1055. mapping = page_mapping(page);
  1056. if (mapping)
  1057. __set_page_dirty(page, mapping, memcg, 0);
  1058. }
  1059. mem_cgroup_end_page_stat(memcg);
  1060. if (mapping)
  1061. __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
  1062. }
  1063. }
  1064. EXPORT_SYMBOL(mark_buffer_dirty);
  1065. /*
  1066. * Decrement a buffer_head's reference count. If all buffers against a page
  1067. * have zero reference count, are clean and unlocked, and if the page is clean
  1068. * and unlocked then try_to_free_buffers() may strip the buffers from the page
  1069. * in preparation for freeing it (sometimes, rarely, buffers are removed from
  1070. * a page but it ends up not being freed, and buffers may later be reattached).
  1071. */
  1072. void __brelse(struct buffer_head * buf)
  1073. {
  1074. if (atomic_read(&buf->b_count)) {
  1075. put_bh(buf);
  1076. return;
  1077. }
  1078. WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
  1079. }
  1080. EXPORT_SYMBOL(__brelse);
  1081. /*
  1082. * bforget() is like brelse(), except it discards any
  1083. * potentially dirty data.
  1084. */
  1085. void __bforget(struct buffer_head *bh)
  1086. {
  1087. clear_buffer_dirty(bh);
  1088. if (bh->b_assoc_map) {
  1089. struct address_space *buffer_mapping = bh->b_page->mapping;
  1090. spin_lock(&buffer_mapping->private_lock);
  1091. list_del_init(&bh->b_assoc_buffers);
  1092. bh->b_assoc_map = NULL;
  1093. spin_unlock(&buffer_mapping->private_lock);
  1094. }
  1095. __brelse(bh);
  1096. }
  1097. EXPORT_SYMBOL(__bforget);
  1098. static struct buffer_head *__bread_slow(struct buffer_head *bh)
  1099. {
  1100. lock_buffer(bh);
  1101. if (buffer_uptodate(bh)) {
  1102. unlock_buffer(bh);
  1103. return bh;
  1104. } else {
  1105. get_bh(bh);
  1106. bh->b_end_io = end_buffer_read_sync;
  1107. submit_bh(READ, bh);
  1108. wait_on_buffer(bh);
  1109. if (buffer_uptodate(bh))
  1110. return bh;
  1111. }
  1112. brelse(bh);
  1113. return NULL;
  1114. }
  1115. /*
  1116. * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
  1117. * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
  1118. * refcount elevated by one when they're in an LRU. A buffer can only appear
  1119. * once in a particular CPU's LRU. A single buffer can be present in multiple
  1120. * CPU's LRUs at the same time.
  1121. *
  1122. * This is a transparent caching front-end to sb_bread(), sb_getblk() and
  1123. * sb_find_get_block().
  1124. *
  1125. * The LRUs themselves only need locking against invalidate_bh_lrus. We use
  1126. * a local interrupt disable for that.
  1127. */
  1128. #define BH_LRU_SIZE 16
  1129. struct bh_lru {
  1130. struct buffer_head *bhs[BH_LRU_SIZE];
  1131. };
  1132. static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
  1133. #ifdef CONFIG_SMP
  1134. #define bh_lru_lock() local_irq_disable()
  1135. #define bh_lru_unlock() local_irq_enable()
  1136. #else
  1137. #define bh_lru_lock() preempt_disable()
  1138. #define bh_lru_unlock() preempt_enable()
  1139. #endif
  1140. static inline void check_irqs_on(void)
  1141. {
  1142. #ifdef irqs_disabled
  1143. BUG_ON(irqs_disabled());
  1144. #endif
  1145. }
  1146. /*
  1147. * The LRU management algorithm is dopey-but-simple. Sorry.
  1148. */
  1149. static void bh_lru_install(struct buffer_head *bh)
  1150. {
  1151. struct buffer_head *evictee = NULL;
  1152. check_irqs_on();
  1153. bh_lru_lock();
  1154. if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
  1155. struct buffer_head *bhs[BH_LRU_SIZE];
  1156. int in;
  1157. int out = 0;
  1158. get_bh(bh);
  1159. bhs[out++] = bh;
  1160. for (in = 0; in < BH_LRU_SIZE; in++) {
  1161. struct buffer_head *bh2 =
  1162. __this_cpu_read(bh_lrus.bhs[in]);
  1163. if (bh2 == bh) {
  1164. __brelse(bh2);
  1165. } else {
  1166. if (out >= BH_LRU_SIZE) {
  1167. BUG_ON(evictee != NULL);
  1168. evictee = bh2;
  1169. } else {
  1170. bhs[out++] = bh2;
  1171. }
  1172. }
  1173. }
  1174. while (out < BH_LRU_SIZE)
  1175. bhs[out++] = NULL;
  1176. memcpy(this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
  1177. }
  1178. bh_lru_unlock();
  1179. if (evictee)
  1180. __brelse(evictee);
  1181. }
  1182. /*
  1183. * Look up the bh in this cpu's LRU. If it's there, move it to the head.
  1184. */
  1185. static struct buffer_head *
  1186. lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
  1187. {
  1188. struct buffer_head *ret = NULL;
  1189. unsigned int i;
  1190. check_irqs_on();
  1191. bh_lru_lock();
  1192. for (i = 0; i < BH_LRU_SIZE; i++) {
  1193. struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
  1194. if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
  1195. bh->b_size == size) {
  1196. if (i) {
  1197. while (i) {
  1198. __this_cpu_write(bh_lrus.bhs[i],
  1199. __this_cpu_read(bh_lrus.bhs[i - 1]));
  1200. i--;
  1201. }
  1202. __this_cpu_write(bh_lrus.bhs[0], bh);
  1203. }
  1204. get_bh(bh);
  1205. ret = bh;
  1206. break;
  1207. }
  1208. }
  1209. bh_lru_unlock();
  1210. return ret;
  1211. }
  1212. /*
  1213. * Perform a pagecache lookup for the matching buffer. If it's there, refresh
  1214. * it in the LRU and mark it as accessed. If it is not present then return
  1215. * NULL
  1216. */
  1217. struct buffer_head *
  1218. __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
  1219. {
  1220. struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
  1221. if (bh == NULL) {
  1222. /* __find_get_block_slow will mark the page accessed */
  1223. bh = __find_get_block_slow(bdev, block);
  1224. if (bh)
  1225. bh_lru_install(bh);
  1226. } else
  1227. touch_buffer(bh);
  1228. return bh;
  1229. }
  1230. EXPORT_SYMBOL(__find_get_block);
  1231. /*
  1232. * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
  1233. * which corresponds to the passed block_device, block and size. The
  1234. * returned buffer has its reference count incremented.
  1235. *
  1236. * __getblk_gfp() will lock up the machine if grow_dev_page's
  1237. * try_to_free_buffers() attempt is failing. FIXME, perhaps?
  1238. */
  1239. struct buffer_head *
  1240. __getblk_gfp(struct block_device *bdev, sector_t block,
  1241. unsigned size, gfp_t gfp)
  1242. {
  1243. struct buffer_head *bh = __find_get_block(bdev, block, size);
  1244. might_sleep();
  1245. if (bh == NULL)
  1246. bh = __getblk_slow(bdev, block, size, gfp);
  1247. return bh;
  1248. }
  1249. EXPORT_SYMBOL(__getblk_gfp);
  1250. /*
  1251. * Do async read-ahead on a buffer..
  1252. */
  1253. void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
  1254. {
  1255. struct buffer_head *bh = __getblk(bdev, block, size);
  1256. if (likely(bh)) {
  1257. ll_rw_block(READA, 1, &bh);
  1258. brelse(bh);
  1259. }
  1260. }
  1261. EXPORT_SYMBOL(__breadahead);
  1262. /**
  1263. * __bread_gfp() - reads a specified block and returns the bh
  1264. * @bdev: the block_device to read from
  1265. * @block: number of block
  1266. * @size: size (in bytes) to read
  1267. * @gfp: page allocation flag
  1268. *
  1269. * Reads a specified block, and returns buffer head that contains it.
  1270. * The page cache can be allocated from non-movable area
  1271. * not to prevent page migration if you set gfp to zero.
  1272. * It returns NULL if the block was unreadable.
  1273. */
  1274. struct buffer_head *
  1275. __bread_gfp(struct block_device *bdev, sector_t block,
  1276. unsigned size, gfp_t gfp)
  1277. {
  1278. struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
  1279. if (likely(bh) && !buffer_uptodate(bh))
  1280. bh = __bread_slow(bh);
  1281. return bh;
  1282. }
  1283. EXPORT_SYMBOL(__bread_gfp);
  1284. /*
  1285. * invalidate_bh_lrus() is called rarely - but not only at unmount.
  1286. * This doesn't race because it runs in each cpu either in irq
  1287. * or with preempt disabled.
  1288. */
  1289. static void invalidate_bh_lru(void *arg)
  1290. {
  1291. struct bh_lru *b = &get_cpu_var(bh_lrus);
  1292. int i;
  1293. for (i = 0; i < BH_LRU_SIZE; i++) {
  1294. brelse(b->bhs[i]);
  1295. b->bhs[i] = NULL;
  1296. }
  1297. put_cpu_var(bh_lrus);
  1298. }
  1299. static bool has_bh_in_lru(int cpu, void *dummy)
  1300. {
  1301. struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
  1302. int i;
  1303. for (i = 0; i < BH_LRU_SIZE; i++) {
  1304. if (b->bhs[i])
  1305. return 1;
  1306. }
  1307. return 0;
  1308. }
  1309. void invalidate_bh_lrus(void)
  1310. {
  1311. on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
  1312. }
  1313. EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
  1314. void set_bh_page(struct buffer_head *bh,
  1315. struct page *page, unsigned long offset)
  1316. {
  1317. bh->b_page = page;
  1318. BUG_ON(offset >= PAGE_SIZE);
  1319. if (PageHighMem(page))
  1320. /*
  1321. * This catches illegal uses and preserves the offset:
  1322. */
  1323. bh->b_data = (char *)(0 + offset);
  1324. else
  1325. bh->b_data = page_address(page) + offset;
  1326. }
  1327. EXPORT_SYMBOL(set_bh_page);
  1328. /*
  1329. * Called when truncating a buffer on a page completely.
  1330. */
  1331. /* Bits that are cleared during an invalidate */
  1332. #define BUFFER_FLAGS_DISCARD \
  1333. (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
  1334. 1 << BH_Delay | 1 << BH_Unwritten)
  1335. static void discard_buffer(struct buffer_head * bh)
  1336. {
  1337. unsigned long b_state, b_state_old;
  1338. lock_buffer(bh);
  1339. clear_buffer_dirty(bh);
  1340. bh->b_bdev = NULL;
  1341. b_state = bh->b_state;
  1342. for (;;) {
  1343. b_state_old = cmpxchg(&bh->b_state, b_state,
  1344. (b_state & ~BUFFER_FLAGS_DISCARD));
  1345. if (b_state_old == b_state)
  1346. break;
  1347. b_state = b_state_old;
  1348. }
  1349. unlock_buffer(bh);
  1350. }
  1351. /**
  1352. * block_invalidatepage - invalidate part or all of a buffer-backed page
  1353. *
  1354. * @page: the page which is affected
  1355. * @offset: start of the range to invalidate
  1356. * @length: length of the range to invalidate
  1357. *
  1358. * block_invalidatepage() is called when all or part of the page has become
  1359. * invalidated by a truncate operation.
  1360. *
  1361. * block_invalidatepage() does not have to release all buffers, but it must
  1362. * ensure that no dirty buffer is left outside @offset and that no I/O
  1363. * is underway against any of the blocks which are outside the truncation
  1364. * point. Because the caller is about to free (and possibly reuse) those
  1365. * blocks on-disk.
  1366. */
  1367. void block_invalidatepage(struct page *page, unsigned int offset,
  1368. unsigned int length)
  1369. {
  1370. struct buffer_head *head, *bh, *next;
  1371. unsigned int curr_off = 0;
  1372. unsigned int stop = length + offset;
  1373. BUG_ON(!PageLocked(page));
  1374. if (!page_has_buffers(page))
  1375. goto out;
  1376. /*
  1377. * Check for overflow
  1378. */
  1379. BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
  1380. head = page_buffers(page);
  1381. bh = head;
  1382. do {
  1383. unsigned int next_off = curr_off + bh->b_size;
  1384. next = bh->b_this_page;
  1385. /*
  1386. * Are we still fully in range ?
  1387. */
  1388. if (next_off > stop)
  1389. goto out;
  1390. /*
  1391. * is this block fully invalidated?
  1392. */
  1393. if (offset <= curr_off)
  1394. discard_buffer(bh);
  1395. curr_off = next_off;
  1396. bh = next;
  1397. } while (bh != head);
  1398. /*
  1399. * We release buffers only if the entire page is being invalidated.
  1400. * The get_block cached value has been unconditionally invalidated,
  1401. * so real IO is not possible anymore.
  1402. */
  1403. if (offset == 0)
  1404. try_to_release_page(page, 0);
  1405. out:
  1406. return;
  1407. }
  1408. EXPORT_SYMBOL(block_invalidatepage);
  1409. /*
  1410. * We attach and possibly dirty the buffers atomically wrt
  1411. * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
  1412. * is already excluded via the page lock.
  1413. */
  1414. void create_empty_buffers(struct page *page,
  1415. unsigned long blocksize, unsigned long b_state)
  1416. {
  1417. struct buffer_head *bh, *head, *tail;
  1418. head = alloc_page_buffers(page, blocksize, 1);
  1419. bh = head;
  1420. do {
  1421. bh->b_state |= b_state;
  1422. tail = bh;
  1423. bh = bh->b_this_page;
  1424. } while (bh);
  1425. tail->b_this_page = head;
  1426. spin_lock(&page->mapping->private_lock);
  1427. if (PageUptodate(page) || PageDirty(page)) {
  1428. bh = head;
  1429. do {
  1430. if (PageDirty(page))
  1431. set_buffer_dirty(bh);
  1432. if (PageUptodate(page))
  1433. set_buffer_uptodate(bh);
  1434. bh = bh->b_this_page;
  1435. } while (bh != head);
  1436. }
  1437. attach_page_buffers(page, head);
  1438. spin_unlock(&page->mapping->private_lock);
  1439. }
  1440. EXPORT_SYMBOL(create_empty_buffers);
  1441. /*
  1442. * We are taking a block for data and we don't want any output from any
  1443. * buffer-cache aliases starting from return from that function and
  1444. * until the moment when something will explicitly mark the buffer
  1445. * dirty (hopefully that will not happen until we will free that block ;-)
  1446. * We don't even need to mark it not-uptodate - nobody can expect
  1447. * anything from a newly allocated buffer anyway. We used to used
  1448. * unmap_buffer() for such invalidation, but that was wrong. We definitely
  1449. * don't want to mark the alias unmapped, for example - it would confuse
  1450. * anyone who might pick it with bread() afterwards...
  1451. *
  1452. * Also.. Note that bforget() doesn't lock the buffer. So there can
  1453. * be writeout I/O going on against recently-freed buffers. We don't
  1454. * wait on that I/O in bforget() - it's more efficient to wait on the I/O
  1455. * only if we really need to. That happens here.
  1456. */
  1457. void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
  1458. {
  1459. struct buffer_head *old_bh;
  1460. might_sleep();
  1461. old_bh = __find_get_block_slow(bdev, block);
  1462. if (old_bh) {
  1463. clear_buffer_dirty(old_bh);
  1464. wait_on_buffer(old_bh);
  1465. clear_buffer_req(old_bh);
  1466. __brelse(old_bh);
  1467. }
  1468. }
  1469. EXPORT_SYMBOL(unmap_underlying_metadata);
  1470. /*
  1471. * Size is a power-of-two in the range 512..PAGE_SIZE,
  1472. * and the case we care about most is PAGE_SIZE.
  1473. *
  1474. * So this *could* possibly be written with those
  1475. * constraints in mind (relevant mostly if some
  1476. * architecture has a slow bit-scan instruction)
  1477. */
  1478. static inline int block_size_bits(unsigned int blocksize)
  1479. {
  1480. return ilog2(blocksize);
  1481. }
  1482. static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
  1483. {
  1484. BUG_ON(!PageLocked(page));
  1485. if (!page_has_buffers(page))
  1486. create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
  1487. return page_buffers(page);
  1488. }
  1489. /*
  1490. * NOTE! All mapped/uptodate combinations are valid:
  1491. *
  1492. * Mapped Uptodate Meaning
  1493. *
  1494. * No No "unknown" - must do get_block()
  1495. * No Yes "hole" - zero-filled
  1496. * Yes No "allocated" - allocated on disk, not read in
  1497. * Yes Yes "valid" - allocated and up-to-date in memory.
  1498. *
  1499. * "Dirty" is valid only with the last case (mapped+uptodate).
  1500. */
  1501. /*
  1502. * While block_write_full_page is writing back the dirty buffers under
  1503. * the page lock, whoever dirtied the buffers may decide to clean them
  1504. * again at any time. We handle that by only looking at the buffer
  1505. * state inside lock_buffer().
  1506. *
  1507. * If block_write_full_page() is called for regular writeback
  1508. * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
  1509. * locked buffer. This only can happen if someone has written the buffer
  1510. * directly, with submit_bh(). At the address_space level PageWriteback
  1511. * prevents this contention from occurring.
  1512. *
  1513. * If block_write_full_page() is called with wbc->sync_mode ==
  1514. * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
  1515. * causes the writes to be flagged as synchronous writes.
  1516. */
  1517. static int __block_write_full_page(struct inode *inode, struct page *page,
  1518. get_block_t *get_block, struct writeback_control *wbc,
  1519. bh_end_io_t *handler)
  1520. {
  1521. int err;
  1522. sector_t block;
  1523. sector_t last_block;
  1524. struct buffer_head *bh, *head;
  1525. unsigned int blocksize, bbits;
  1526. int nr_underway = 0;
  1527. int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
  1528. head = create_page_buffers(page, inode,
  1529. (1 << BH_Dirty)|(1 << BH_Uptodate));
  1530. /*
  1531. * Be very careful. We have no exclusion from __set_page_dirty_buffers
  1532. * here, and the (potentially unmapped) buffers may become dirty at
  1533. * any time. If a buffer becomes dirty here after we've inspected it
  1534. * then we just miss that fact, and the page stays dirty.
  1535. *
  1536. * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
  1537. * handle that here by just cleaning them.
  1538. */
  1539. bh = head;
  1540. blocksize = bh->b_size;
  1541. bbits = block_size_bits(blocksize);
  1542. block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
  1543. last_block = (i_size_read(inode) - 1) >> bbits;
  1544. /*
  1545. * Get all the dirty buffers mapped to disk addresses and
  1546. * handle any aliases from the underlying blockdev's mapping.
  1547. */
  1548. do {
  1549. if (block > last_block) {
  1550. /*
  1551. * mapped buffers outside i_size will occur, because
  1552. * this page can be outside i_size when there is a
  1553. * truncate in progress.
  1554. */
  1555. /*
  1556. * The buffer was zeroed by block_write_full_page()
  1557. */
  1558. clear_buffer_dirty(bh);
  1559. set_buffer_uptodate(bh);
  1560. } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
  1561. buffer_dirty(bh)) {
  1562. WARN_ON(bh->b_size != blocksize);
  1563. err = get_block(inode, block, bh, 1);
  1564. if (err)
  1565. goto recover;
  1566. clear_buffer_delay(bh);
  1567. if (buffer_new(bh)) {
  1568. /* blockdev mappings never come here */
  1569. clear_buffer_new(bh);
  1570. unmap_underlying_metadata(bh->b_bdev,
  1571. bh->b_blocknr);
  1572. }
  1573. }
  1574. bh = bh->b_this_page;
  1575. block++;
  1576. } while (bh != head);
  1577. do {
  1578. if (!buffer_mapped(bh))
  1579. continue;
  1580. /*
  1581. * If it's a fully non-blocking write attempt and we cannot
  1582. * lock the buffer then redirty the page. Note that this can
  1583. * potentially cause a busy-wait loop from writeback threads
  1584. * and kswapd activity, but those code paths have their own
  1585. * higher-level throttling.
  1586. */
  1587. if (wbc->sync_mode != WB_SYNC_NONE) {
  1588. lock_buffer(bh);
  1589. } else if (!trylock_buffer(bh)) {
  1590. redirty_page_for_writepage(wbc, page);
  1591. continue;
  1592. }
  1593. if (test_clear_buffer_dirty(bh)) {
  1594. mark_buffer_async_write_endio(bh, handler);
  1595. } else {
  1596. unlock_buffer(bh);
  1597. }
  1598. } while ((bh = bh->b_this_page) != head);
  1599. /*
  1600. * The page and its buffers are protected by PageWriteback(), so we can
  1601. * drop the bh refcounts early.
  1602. */
  1603. BUG_ON(PageWriteback(page));
  1604. set_page_writeback(page);
  1605. do {
  1606. struct buffer_head *next = bh->b_this_page;
  1607. if (buffer_async_write(bh)) {
  1608. submit_bh_wbc(write_op, bh, 0, wbc);
  1609. nr_underway++;
  1610. }
  1611. bh = next;
  1612. } while (bh != head);
  1613. unlock_page(page);
  1614. err = 0;
  1615. done:
  1616. if (nr_underway == 0) {
  1617. /*
  1618. * The page was marked dirty, but the buffers were
  1619. * clean. Someone wrote them back by hand with
  1620. * ll_rw_block/submit_bh. A rare case.
  1621. */
  1622. end_page_writeback(page);
  1623. /*
  1624. * The page and buffer_heads can be released at any time from
  1625. * here on.
  1626. */
  1627. }
  1628. return err;
  1629. recover:
  1630. /*
  1631. * ENOSPC, or some other error. We may already have added some
  1632. * blocks to the file, so we need to write these out to avoid
  1633. * exposing stale data.
  1634. * The page is currently locked and not marked for writeback
  1635. */
  1636. bh = head;
  1637. /* Recovery: lock and submit the mapped buffers */
  1638. do {
  1639. if (buffer_mapped(bh) && buffer_dirty(bh) &&
  1640. !buffer_delay(bh)) {
  1641. lock_buffer(bh);
  1642. mark_buffer_async_write_endio(bh, handler);
  1643. } else {
  1644. /*
  1645. * The buffer may have been set dirty during
  1646. * attachment to a dirty page.
  1647. */
  1648. clear_buffer_dirty(bh);
  1649. }
  1650. } while ((bh = bh->b_this_page) != head);
  1651. SetPageError(page);
  1652. BUG_ON(PageWriteback(page));
  1653. mapping_set_error(page->mapping, err);
  1654. set_page_writeback(page);
  1655. do {
  1656. struct buffer_head *next = bh->b_this_page;
  1657. if (buffer_async_write(bh)) {
  1658. clear_buffer_dirty(bh);
  1659. submit_bh_wbc(write_op, bh, 0, wbc);
  1660. nr_underway++;
  1661. }
  1662. bh = next;
  1663. } while (bh != head);
  1664. unlock_page(page);
  1665. goto done;
  1666. }
  1667. /*
  1668. * If a page has any new buffers, zero them out here, and mark them uptodate
  1669. * and dirty so they'll be written out (in order to prevent uninitialised
  1670. * block data from leaking). And clear the new bit.
  1671. */
  1672. void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
  1673. {
  1674. unsigned int block_start, block_end;
  1675. struct buffer_head *head, *bh;
  1676. BUG_ON(!PageLocked(page));
  1677. if (!page_has_buffers(page))
  1678. return;
  1679. bh = head = page_buffers(page);
  1680. block_start = 0;
  1681. do {
  1682. block_end = block_start + bh->b_size;
  1683. if (buffer_new(bh)) {
  1684. if (block_end > from && block_start < to) {
  1685. if (!PageUptodate(page)) {
  1686. unsigned start, size;
  1687. start = max(from, block_start);
  1688. size = min(to, block_end) - start;
  1689. zero_user(page, start, size);
  1690. set_buffer_uptodate(bh);
  1691. }
  1692. clear_buffer_new(bh);
  1693. mark_buffer_dirty(bh);
  1694. }
  1695. }
  1696. block_start = block_end;
  1697. bh = bh->b_this_page;
  1698. } while (bh != head);
  1699. }
  1700. EXPORT_SYMBOL(page_zero_new_buffers);
  1701. int __block_write_begin(struct page *page, loff_t pos, unsigned len,
  1702. get_block_t *get_block)
  1703. {
  1704. unsigned from = pos & (PAGE_CACHE_SIZE - 1);
  1705. unsigned to = from + len;
  1706. struct inode *inode = page->mapping->host;
  1707. unsigned block_start, block_end;
  1708. sector_t block;
  1709. int err = 0;
  1710. unsigned blocksize, bbits;
  1711. struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
  1712. BUG_ON(!PageLocked(page));
  1713. BUG_ON(from > PAGE_CACHE_SIZE);
  1714. BUG_ON(to > PAGE_CACHE_SIZE);
  1715. BUG_ON(from > to);
  1716. head = create_page_buffers(page, inode, 0);
  1717. blocksize = head->b_size;
  1718. bbits = block_size_bits(blocksize);
  1719. block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
  1720. for(bh = head, block_start = 0; bh != head || !block_start;
  1721. block++, block_start=block_end, bh = bh->b_this_page) {
  1722. block_end = block_start + blocksize;
  1723. if (block_end <= from || block_start >= to) {
  1724. if (PageUptodate(page)) {
  1725. if (!buffer_uptodate(bh))
  1726. set_buffer_uptodate(bh);
  1727. }
  1728. continue;
  1729. }
  1730. if (buffer_new(bh))
  1731. clear_buffer_new(bh);
  1732. if (!buffer_mapped(bh)) {
  1733. WARN_ON(bh->b_size != blocksize);
  1734. err = get_block(inode, block, bh, 1);
  1735. if (err)
  1736. break;
  1737. if (buffer_new(bh)) {
  1738. unmap_underlying_metadata(bh->b_bdev,
  1739. bh->b_blocknr);
  1740. if (PageUptodate(page)) {
  1741. clear_buffer_new(bh);
  1742. set_buffer_uptodate(bh);
  1743. mark_buffer_dirty(bh);
  1744. continue;
  1745. }
  1746. if (block_end > to || block_start < from)
  1747. zero_user_segments(page,
  1748. to, block_end,
  1749. block_start, from);
  1750. continue;
  1751. }
  1752. }
  1753. if (PageUptodate(page)) {
  1754. if (!buffer_uptodate(bh))
  1755. set_buffer_uptodate(bh);
  1756. continue;
  1757. }
  1758. if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
  1759. !buffer_unwritten(bh) &&
  1760. (block_start < from || block_end > to)) {
  1761. ll_rw_block(READ, 1, &bh);
  1762. *wait_bh++=bh;
  1763. }
  1764. }
  1765. /*
  1766. * If we issued read requests - let them complete.
  1767. */
  1768. while(wait_bh > wait) {
  1769. wait_on_buffer(*--wait_bh);
  1770. if (!buffer_uptodate(*wait_bh))
  1771. err = -EIO;
  1772. }
  1773. if (unlikely(err))
  1774. page_zero_new_buffers(page, from, to);
  1775. return err;
  1776. }
  1777. EXPORT_SYMBOL(__block_write_begin);
  1778. static int __block_commit_write(struct inode *inode, struct page *page,
  1779. unsigned from, unsigned to)
  1780. {
  1781. unsigned block_start, block_end;
  1782. int partial = 0;
  1783. unsigned blocksize;
  1784. struct buffer_head *bh, *head;
  1785. bh = head = page_buffers(page);
  1786. blocksize = bh->b_size;
  1787. block_start = 0;
  1788. do {
  1789. block_end = block_start + blocksize;
  1790. if (block_end <= from || block_start >= to) {
  1791. if (!buffer_uptodate(bh))
  1792. partial = 1;
  1793. } else {
  1794. set_buffer_uptodate(bh);
  1795. mark_buffer_dirty(bh);
  1796. }
  1797. clear_buffer_new(bh);
  1798. block_start = block_end;
  1799. bh = bh->b_this_page;
  1800. } while (bh != head);
  1801. /*
  1802. * If this is a partial write which happened to make all buffers
  1803. * uptodate then we can optimize away a bogus readpage() for
  1804. * the next read(). Here we 'discover' whether the page went
  1805. * uptodate as a result of this (potentially partial) write.
  1806. */
  1807. if (!partial)
  1808. SetPageUptodate(page);
  1809. return 0;
  1810. }
  1811. /*
  1812. * block_write_begin takes care of the basic task of block allocation and
  1813. * bringing partial write blocks uptodate first.
  1814. *
  1815. * The filesystem needs to handle block truncation upon failure.
  1816. */
  1817. int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
  1818. unsigned flags, struct page **pagep, get_block_t *get_block)
  1819. {
  1820. pgoff_t index = pos >> PAGE_CACHE_SHIFT;
  1821. struct page *page;
  1822. int status;
  1823. page = grab_cache_page_write_begin(mapping, index, flags);
  1824. if (!page)
  1825. return -ENOMEM;
  1826. status = __block_write_begin(page, pos, len, get_block);
  1827. if (unlikely(status)) {
  1828. unlock_page(page);
  1829. page_cache_release(page);
  1830. page = NULL;
  1831. }
  1832. *pagep = page;
  1833. return status;
  1834. }
  1835. EXPORT_SYMBOL(block_write_begin);
  1836. int block_write_end(struct file *file, struct address_space *mapping,
  1837. loff_t pos, unsigned len, unsigned copied,
  1838. struct page *page, void *fsdata)
  1839. {
  1840. struct inode *inode = mapping->host;
  1841. unsigned start;
  1842. start = pos & (PAGE_CACHE_SIZE - 1);
  1843. if (unlikely(copied < len)) {
  1844. /*
  1845. * The buffers that were written will now be uptodate, so we
  1846. * don't have to worry about a readpage reading them and
  1847. * overwriting a partial write. However if we have encountered
  1848. * a short write and only partially written into a buffer, it
  1849. * will not be marked uptodate, so a readpage might come in and
  1850. * destroy our partial write.
  1851. *
  1852. * Do the simplest thing, and just treat any short write to a
  1853. * non uptodate page as a zero-length write, and force the
  1854. * caller to redo the whole thing.
  1855. */
  1856. if (!PageUptodate(page))
  1857. copied = 0;
  1858. page_zero_new_buffers(page, start+copied, start+len);
  1859. }
  1860. flush_dcache_page(page);
  1861. /* This could be a short (even 0-length) commit */
  1862. __block_commit_write(inode, page, start, start+copied);
  1863. return copied;
  1864. }
  1865. EXPORT_SYMBOL(block_write_end);
  1866. int generic_write_end(struct file *file, struct address_space *mapping,
  1867. loff_t pos, unsigned len, unsigned copied,
  1868. struct page *page, void *fsdata)
  1869. {
  1870. struct inode *inode = mapping->host;
  1871. loff_t old_size = inode->i_size;
  1872. int i_size_changed = 0;
  1873. copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
  1874. /*
  1875. * No need to use i_size_read() here, the i_size
  1876. * cannot change under us because we hold i_mutex.
  1877. *
  1878. * But it's important to update i_size while still holding page lock:
  1879. * page writeout could otherwise come in and zero beyond i_size.
  1880. */
  1881. if (pos+copied > inode->i_size) {
  1882. i_size_write(inode, pos+copied);
  1883. i_size_changed = 1;
  1884. }
  1885. unlock_page(page);
  1886. page_cache_release(page);
  1887. if (old_size < pos)
  1888. pagecache_isize_extended(inode, old_size, pos);
  1889. /*
  1890. * Don't mark the inode dirty under page lock. First, it unnecessarily
  1891. * makes the holding time of page lock longer. Second, it forces lock
  1892. * ordering of page lock and transaction start for journaling
  1893. * filesystems.
  1894. */
  1895. if (i_size_changed)
  1896. mark_inode_dirty(inode);
  1897. return copied;
  1898. }
  1899. EXPORT_SYMBOL(generic_write_end);
  1900. /*
  1901. * block_is_partially_uptodate checks whether buffers within a page are
  1902. * uptodate or not.
  1903. *
  1904. * Returns true if all buffers which correspond to a file portion
  1905. * we want to read are uptodate.
  1906. */
  1907. int block_is_partially_uptodate(struct page *page, unsigned long from,
  1908. unsigned long count)
  1909. {
  1910. unsigned block_start, block_end, blocksize;
  1911. unsigned to;
  1912. struct buffer_head *bh, *head;
  1913. int ret = 1;
  1914. if (!page_has_buffers(page))
  1915. return 0;
  1916. head = page_buffers(page);
  1917. blocksize = head->b_size;
  1918. to = min_t(unsigned, PAGE_CACHE_SIZE - from, count);
  1919. to = from + to;
  1920. if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
  1921. return 0;
  1922. bh = head;
  1923. block_start = 0;
  1924. do {
  1925. block_end = block_start + blocksize;
  1926. if (block_end > from && block_start < to) {
  1927. if (!buffer_uptodate(bh)) {
  1928. ret = 0;
  1929. break;
  1930. }
  1931. if (block_end >= to)
  1932. break;
  1933. }
  1934. block_start = block_end;
  1935. bh = bh->b_this_page;
  1936. } while (bh != head);
  1937. return ret;
  1938. }
  1939. EXPORT_SYMBOL(block_is_partially_uptodate);
  1940. /*
  1941. * Generic "read page" function for block devices that have the normal
  1942. * get_block functionality. This is most of the block device filesystems.
  1943. * Reads the page asynchronously --- the unlock_buffer() and
  1944. * set/clear_buffer_uptodate() functions propagate buffer state into the
  1945. * page struct once IO has completed.
  1946. */
  1947. int block_read_full_page(struct page *page, get_block_t *get_block)
  1948. {
  1949. struct inode *inode = page->mapping->host;
  1950. sector_t iblock, lblock;
  1951. struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
  1952. unsigned int blocksize, bbits;
  1953. int nr, i;
  1954. int fully_mapped = 1;
  1955. head = create_page_buffers(page, inode, 0);
  1956. blocksize = head->b_size;
  1957. bbits = block_size_bits(blocksize);
  1958. iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
  1959. lblock = (i_size_read(inode)+blocksize-1) >> bbits;
  1960. bh = head;
  1961. nr = 0;
  1962. i = 0;
  1963. do {
  1964. if (buffer_uptodate(bh))
  1965. continue;
  1966. if (!buffer_mapped(bh)) {
  1967. int err = 0;
  1968. fully_mapped = 0;
  1969. if (iblock < lblock) {
  1970. WARN_ON(bh->b_size != blocksize);
  1971. err = get_block(inode, iblock, bh, 0);
  1972. if (err)
  1973. SetPageError(page);
  1974. }
  1975. if (!buffer_mapped(bh)) {
  1976. zero_user(page, i * blocksize, blocksize);
  1977. if (!err)
  1978. set_buffer_uptodate(bh);
  1979. continue;
  1980. }
  1981. /*
  1982. * get_block() might have updated the buffer
  1983. * synchronously
  1984. */
  1985. if (buffer_uptodate(bh))
  1986. continue;
  1987. }
  1988. arr[nr++] = bh;
  1989. } while (i++, iblock++, (bh = bh->b_this_page) != head);
  1990. if (fully_mapped)
  1991. SetPageMappedToDisk(page);
  1992. if (!nr) {
  1993. /*
  1994. * All buffers are uptodate - we can set the page uptodate
  1995. * as well. But not if get_block() returned an error.
  1996. */
  1997. if (!PageError(page))
  1998. SetPageUptodate(page);
  1999. unlock_page(page);
  2000. return 0;
  2001. }
  2002. /* Stage two: lock the buffers */
  2003. for (i = 0; i < nr; i++) {
  2004. bh = arr[i];
  2005. lock_buffer(bh);
  2006. mark_buffer_async_read(bh);
  2007. }
  2008. /*
  2009. * Stage 3: start the IO. Check for uptodateness
  2010. * inside the buffer lock in case another process reading
  2011. * the underlying blockdev brought it uptodate (the sct fix).
  2012. */
  2013. for (i = 0; i < nr; i++) {
  2014. bh = arr[i];
  2015. if (buffer_uptodate(bh))
  2016. end_buffer_async_read(bh, 1);
  2017. else
  2018. submit_bh(READ, bh);
  2019. }
  2020. return 0;
  2021. }
  2022. EXPORT_SYMBOL(block_read_full_page);
  2023. /* utility function for filesystems that need to do work on expanding
  2024. * truncates. Uses filesystem pagecache writes to allow the filesystem to
  2025. * deal with the hole.
  2026. */
  2027. int generic_cont_expand_simple(struct inode *inode, loff_t size)
  2028. {
  2029. struct address_space *mapping = inode->i_mapping;
  2030. struct page *page;
  2031. void *fsdata;
  2032. int err;
  2033. err = inode_newsize_ok(inode, size);
  2034. if (err)
  2035. goto out;
  2036. err = pagecache_write_begin(NULL, mapping, size, 0,
  2037. AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
  2038. &page, &fsdata);
  2039. if (err)
  2040. goto out;
  2041. err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
  2042. BUG_ON(err > 0);
  2043. out:
  2044. return err;
  2045. }
  2046. EXPORT_SYMBOL(generic_cont_expand_simple);
  2047. static int cont_expand_zero(struct file *file, struct address_space *mapping,
  2048. loff_t pos, loff_t *bytes)
  2049. {
  2050. struct inode *inode = mapping->host;
  2051. unsigned blocksize = 1 << inode->i_blkbits;
  2052. struct page *page;
  2053. void *fsdata;
  2054. pgoff_t index, curidx;
  2055. loff_t curpos;
  2056. unsigned zerofrom, offset, len;
  2057. int err = 0;
  2058. index = pos >> PAGE_CACHE_SHIFT;
  2059. offset = pos & ~PAGE_CACHE_MASK;
  2060. while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
  2061. zerofrom = curpos & ~PAGE_CACHE_MASK;
  2062. if (zerofrom & (blocksize-1)) {
  2063. *bytes |= (blocksize-1);
  2064. (*bytes)++;
  2065. }
  2066. len = PAGE_CACHE_SIZE - zerofrom;
  2067. err = pagecache_write_begin(file, mapping, curpos, len,
  2068. AOP_FLAG_UNINTERRUPTIBLE,
  2069. &page, &fsdata);
  2070. if (err)
  2071. goto out;
  2072. zero_user(page, zerofrom, len);
  2073. err = pagecache_write_end(file, mapping, curpos, len, len,
  2074. page, fsdata);
  2075. if (err < 0)
  2076. goto out;
  2077. BUG_ON(err != len);
  2078. err = 0;
  2079. balance_dirty_pages_ratelimited(mapping);
  2080. if (unlikely(fatal_signal_pending(current))) {
  2081. err = -EINTR;
  2082. goto out;
  2083. }
  2084. }
  2085. /* page covers the boundary, find the boundary offset */
  2086. if (index == curidx) {
  2087. zerofrom = curpos & ~PAGE_CACHE_MASK;
  2088. /* if we will expand the thing last block will be filled */
  2089. if (offset <= zerofrom) {
  2090. goto out;
  2091. }
  2092. if (zerofrom & (blocksize-1)) {
  2093. *bytes |= (blocksize-1);
  2094. (*bytes)++;
  2095. }
  2096. len = offset - zerofrom;
  2097. err = pagecache_write_begin(file, mapping, curpos, len,
  2098. AOP_FLAG_UNINTERRUPTIBLE,
  2099. &page, &fsdata);
  2100. if (err)
  2101. goto out;
  2102. zero_user(page, zerofrom, len);
  2103. err = pagecache_write_end(file, mapping, curpos, len, len,
  2104. page, fsdata);
  2105. if (err < 0)
  2106. goto out;
  2107. BUG_ON(err != len);
  2108. err = 0;
  2109. }
  2110. out:
  2111. return err;
  2112. }
  2113. /*
  2114. * For moronic filesystems that do not allow holes in file.
  2115. * We may have to extend the file.
  2116. */
  2117. int cont_write_begin(struct file *file, struct address_space *mapping,
  2118. loff_t pos, unsigned len, unsigned flags,
  2119. struct page **pagep, void **fsdata,
  2120. get_block_t *get_block, loff_t *bytes)
  2121. {
  2122. struct inode *inode = mapping->host;
  2123. unsigned blocksize = 1 << inode->i_blkbits;
  2124. unsigned zerofrom;
  2125. int err;
  2126. err = cont_expand_zero(file, mapping, pos, bytes);
  2127. if (err)
  2128. return err;
  2129. zerofrom = *bytes & ~PAGE_CACHE_MASK;
  2130. if (pos+len > *bytes && zerofrom & (blocksize-1)) {
  2131. *bytes |= (blocksize-1);
  2132. (*bytes)++;
  2133. }
  2134. return block_write_begin(mapping, pos, len, flags, pagep, get_block);
  2135. }
  2136. EXPORT_SYMBOL(cont_write_begin);
  2137. int block_commit_write(struct page *page, unsigned from, unsigned to)
  2138. {
  2139. struct inode *inode = page->mapping->host;
  2140. __block_commit_write(inode,page,from,to);
  2141. return 0;
  2142. }
  2143. EXPORT_SYMBOL(block_commit_write);
  2144. /*
  2145. * block_page_mkwrite() is not allowed to change the file size as it gets
  2146. * called from a page fault handler when a page is first dirtied. Hence we must
  2147. * be careful to check for EOF conditions here. We set the page up correctly
  2148. * for a written page which means we get ENOSPC checking when writing into
  2149. * holes and correct delalloc and unwritten extent mapping on filesystems that
  2150. * support these features.
  2151. *
  2152. * We are not allowed to take the i_mutex here so we have to play games to
  2153. * protect against truncate races as the page could now be beyond EOF. Because
  2154. * truncate writes the inode size before removing pages, once we have the
  2155. * page lock we can determine safely if the page is beyond EOF. If it is not
  2156. * beyond EOF, then the page is guaranteed safe against truncation until we
  2157. * unlock the page.
  2158. *
  2159. * Direct callers of this function should protect against filesystem freezing
  2160. * using sb_start_pagefault() - sb_end_pagefault() functions.
  2161. */
  2162. int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
  2163. get_block_t get_block)
  2164. {
  2165. struct page *page = vmf->page;
  2166. struct inode *inode = file_inode(vma->vm_file);
  2167. unsigned long end;
  2168. loff_t size;
  2169. int ret;
  2170. lock_page(page);
  2171. size = i_size_read(inode);
  2172. if ((page->mapping != inode->i_mapping) ||
  2173. (page_offset(page) > size)) {
  2174. /* We overload EFAULT to mean page got truncated */
  2175. ret = -EFAULT;
  2176. goto out_unlock;
  2177. }
  2178. /* page is wholly or partially inside EOF */
  2179. if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
  2180. end = size & ~PAGE_CACHE_MASK;
  2181. else
  2182. end = PAGE_CACHE_SIZE;
  2183. ret = __block_write_begin(page, 0, end, get_block);
  2184. if (!ret)
  2185. ret = block_commit_write(page, 0, end);
  2186. if (unlikely(ret < 0))
  2187. goto out_unlock;
  2188. set_page_dirty(page);
  2189. wait_for_stable_page(page);
  2190. return 0;
  2191. out_unlock:
  2192. unlock_page(page);
  2193. return ret;
  2194. }
  2195. EXPORT_SYMBOL(block_page_mkwrite);
  2196. /*
  2197. * nobh_write_begin()'s prereads are special: the buffer_heads are freed
  2198. * immediately, while under the page lock. So it needs a special end_io
  2199. * handler which does not touch the bh after unlocking it.
  2200. */
  2201. static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
  2202. {
  2203. __end_buffer_read_notouch(bh, uptodate);
  2204. }
  2205. /*
  2206. * Attach the singly-linked list of buffers created by nobh_write_begin, to
  2207. * the page (converting it to circular linked list and taking care of page
  2208. * dirty races).
  2209. */
  2210. static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
  2211. {
  2212. struct buffer_head *bh;
  2213. BUG_ON(!PageLocked(page));
  2214. spin_lock(&page->mapping->private_lock);
  2215. bh = head;
  2216. do {
  2217. if (PageDirty(page))
  2218. set_buffer_dirty(bh);
  2219. if (!bh->b_this_page)
  2220. bh->b_this_page = head;
  2221. bh = bh->b_this_page;
  2222. } while (bh != head);
  2223. attach_page_buffers(page, head);
  2224. spin_unlock(&page->mapping->private_lock);
  2225. }
  2226. /*
  2227. * On entry, the page is fully not uptodate.
  2228. * On exit the page is fully uptodate in the areas outside (from,to)
  2229. * The filesystem needs to handle block truncation upon failure.
  2230. */
  2231. int nobh_write_begin(struct address_space *mapping,
  2232. loff_t pos, unsigned len, unsigned flags,
  2233. struct page **pagep, void **fsdata,
  2234. get_block_t *get_block)
  2235. {
  2236. struct inode *inode = mapping->host;
  2237. const unsigned blkbits = inode->i_blkbits;
  2238. const unsigned blocksize = 1 << blkbits;
  2239. struct buffer_head *head, *bh;
  2240. struct page *page;
  2241. pgoff_t index;
  2242. unsigned from, to;
  2243. unsigned block_in_page;
  2244. unsigned block_start, block_end;
  2245. sector_t block_in_file;
  2246. int nr_reads = 0;
  2247. int ret = 0;
  2248. int is_mapped_to_disk = 1;
  2249. index = pos >> PAGE_CACHE_SHIFT;
  2250. from = pos & (PAGE_CACHE_SIZE - 1);
  2251. to = from + len;
  2252. page = grab_cache_page_write_begin(mapping, index, flags);
  2253. if (!page)
  2254. return -ENOMEM;
  2255. *pagep = page;
  2256. *fsdata = NULL;
  2257. if (page_has_buffers(page)) {
  2258. ret = __block_write_begin(page, pos, len, get_block);
  2259. if (unlikely(ret))
  2260. goto out_release;
  2261. return ret;
  2262. }
  2263. if (PageMappedToDisk(page))
  2264. return 0;
  2265. /*
  2266. * Allocate buffers so that we can keep track of state, and potentially
  2267. * attach them to the page if an error occurs. In the common case of
  2268. * no error, they will just be freed again without ever being attached
  2269. * to the page (which is all OK, because we're under the page lock).
  2270. *
  2271. * Be careful: the buffer linked list is a NULL terminated one, rather
  2272. * than the circular one we're used to.
  2273. */
  2274. head = alloc_page_buffers(page, blocksize, 0);
  2275. if (!head) {
  2276. ret = -ENOMEM;
  2277. goto out_release;
  2278. }
  2279. block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
  2280. /*
  2281. * We loop across all blocks in the page, whether or not they are
  2282. * part of the affected region. This is so we can discover if the
  2283. * page is fully mapped-to-disk.
  2284. */
  2285. for (block_start = 0, block_in_page = 0, bh = head;
  2286. block_start < PAGE_CACHE_SIZE;
  2287. block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
  2288. int create;
  2289. block_end = block_start + blocksize;
  2290. bh->b_state = 0;
  2291. create = 1;
  2292. if (block_start >= to)
  2293. create = 0;
  2294. ret = get_block(inode, block_in_file + block_in_page,
  2295. bh, create);
  2296. if (ret)
  2297. goto failed;
  2298. if (!buffer_mapped(bh))
  2299. is_mapped_to_disk = 0;
  2300. if (buffer_new(bh))
  2301. unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
  2302. if (PageUptodate(page)) {
  2303. set_buffer_uptodate(bh);
  2304. continue;
  2305. }
  2306. if (buffer_new(bh) || !buffer_mapped(bh)) {
  2307. zero_user_segments(page, block_start, from,
  2308. to, block_end);
  2309. continue;
  2310. }
  2311. if (buffer_uptodate(bh))
  2312. continue; /* reiserfs does this */
  2313. if (block_start < from || block_end > to) {
  2314. lock_buffer(bh);
  2315. bh->b_end_io = end_buffer_read_nobh;
  2316. submit_bh(READ, bh);
  2317. nr_reads++;
  2318. }
  2319. }
  2320. if (nr_reads) {
  2321. /*
  2322. * The page is locked, so these buffers are protected from
  2323. * any VM or truncate activity. Hence we don't need to care
  2324. * for the buffer_head refcounts.
  2325. */
  2326. for (bh = head; bh; bh = bh->b_this_page) {
  2327. wait_on_buffer(bh);
  2328. if (!buffer_uptodate(bh))
  2329. ret = -EIO;
  2330. }
  2331. if (ret)
  2332. goto failed;
  2333. }
  2334. if (is_mapped_to_disk)
  2335. SetPageMappedToDisk(page);
  2336. *fsdata = head; /* to be released by nobh_write_end */
  2337. return 0;
  2338. failed:
  2339. BUG_ON(!ret);
  2340. /*
  2341. * Error recovery is a bit difficult. We need to zero out blocks that
  2342. * were newly allocated, and dirty them to ensure they get written out.
  2343. * Buffers need to be attached to the page at this point, otherwise
  2344. * the handling of potential IO errors during writeout would be hard
  2345. * (could try doing synchronous writeout, but what if that fails too?)
  2346. */
  2347. attach_nobh_buffers(page, head);
  2348. page_zero_new_buffers(page, from, to);
  2349. out_release:
  2350. unlock_page(page);
  2351. page_cache_release(page);
  2352. *pagep = NULL;
  2353. return ret;
  2354. }
  2355. EXPORT_SYMBOL(nobh_write_begin);
  2356. int nobh_write_end(struct file *file, struct address_space *mapping,
  2357. loff_t pos, unsigned len, unsigned copied,
  2358. struct page *page, void *fsdata)
  2359. {
  2360. struct inode *inode = page->mapping->host;
  2361. struct buffer_head *head = fsdata;
  2362. struct buffer_head *bh;
  2363. BUG_ON(fsdata != NULL && page_has_buffers(page));
  2364. if (unlikely(copied < len) && head)
  2365. attach_nobh_buffers(page, head);
  2366. if (page_has_buffers(page))
  2367. return generic_write_end(file, mapping, pos, len,
  2368. copied, page, fsdata);
  2369. SetPageUptodate(page);
  2370. set_page_dirty(page);
  2371. if (pos+copied > inode->i_size) {
  2372. i_size_write(inode, pos+copied);
  2373. mark_inode_dirty(inode);
  2374. }
  2375. unlock_page(page);
  2376. page_cache_release(page);
  2377. while (head) {
  2378. bh = head;
  2379. head = head->b_this_page;
  2380. free_buffer_head(bh);
  2381. }
  2382. return copied;
  2383. }
  2384. EXPORT_SYMBOL(nobh_write_end);
  2385. /*
  2386. * nobh_writepage() - based on block_full_write_page() except
  2387. * that it tries to operate without attaching bufferheads to
  2388. * the page.
  2389. */
  2390. int nobh_writepage(struct page *page, get_block_t *get_block,
  2391. struct writeback_control *wbc)
  2392. {
  2393. struct inode * const inode = page->mapping->host;
  2394. loff_t i_size = i_size_read(inode);
  2395. const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  2396. unsigned offset;
  2397. int ret;
  2398. /* Is the page fully inside i_size? */
  2399. if (page->index < end_index)
  2400. goto out;
  2401. /* Is the page fully outside i_size? (truncate in progress) */
  2402. offset = i_size & (PAGE_CACHE_SIZE-1);
  2403. if (page->index >= end_index+1 || !offset) {
  2404. /*
  2405. * The page may have dirty, unmapped buffers. For example,
  2406. * they may have been added in ext3_writepage(). Make them
  2407. * freeable here, so the page does not leak.
  2408. */
  2409. #if 0
  2410. /* Not really sure about this - do we need this ? */
  2411. if (page->mapping->a_ops->invalidatepage)
  2412. page->mapping->a_ops->invalidatepage(page, offset);
  2413. #endif
  2414. unlock_page(page);
  2415. return 0; /* don't care */
  2416. }
  2417. /*
  2418. * The page straddles i_size. It must be zeroed out on each and every
  2419. * writepage invocation because it may be mmapped. "A file is mapped
  2420. * in multiples of the page size. For a file that is not a multiple of
  2421. * the page size, the remaining memory is zeroed when mapped, and
  2422. * writes to that region are not written out to the file."
  2423. */
  2424. zero_user_segment(page, offset, PAGE_CACHE_SIZE);
  2425. out:
  2426. ret = mpage_writepage(page, get_block, wbc);
  2427. if (ret == -EAGAIN)
  2428. ret = __block_write_full_page(inode, page, get_block, wbc,
  2429. end_buffer_async_write);
  2430. return ret;
  2431. }
  2432. EXPORT_SYMBOL(nobh_writepage);
  2433. int nobh_truncate_page(struct address_space *mapping,
  2434. loff_t from, get_block_t *get_block)
  2435. {
  2436. pgoff_t index = from >> PAGE_CACHE_SHIFT;
  2437. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  2438. unsigned blocksize;
  2439. sector_t iblock;
  2440. unsigned length, pos;
  2441. struct inode *inode = mapping->host;
  2442. struct page *page;
  2443. struct buffer_head map_bh;
  2444. int err;
  2445. blocksize = 1 << inode->i_blkbits;
  2446. length = offset & (blocksize - 1);
  2447. /* Block boundary? Nothing to do */
  2448. if (!length)
  2449. return 0;
  2450. length = blocksize - length;
  2451. iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  2452. page = grab_cache_page(mapping, index);
  2453. err = -ENOMEM;
  2454. if (!page)
  2455. goto out;
  2456. if (page_has_buffers(page)) {
  2457. has_buffers:
  2458. unlock_page(page);
  2459. page_cache_release(page);
  2460. return block_truncate_page(mapping, from, get_block);
  2461. }
  2462. /* Find the buffer that contains "offset" */
  2463. pos = blocksize;
  2464. while (offset >= pos) {
  2465. iblock++;
  2466. pos += blocksize;
  2467. }
  2468. map_bh.b_size = blocksize;
  2469. map_bh.b_state = 0;
  2470. err = get_block(inode, iblock, &map_bh, 0);
  2471. if (err)
  2472. goto unlock;
  2473. /* unmapped? It's a hole - nothing to do */
  2474. if (!buffer_mapped(&map_bh))
  2475. goto unlock;
  2476. /* Ok, it's mapped. Make sure it's up-to-date */
  2477. if (!PageUptodate(page)) {
  2478. err = mapping->a_ops->readpage(NULL, page);
  2479. if (err) {
  2480. page_cache_release(page);
  2481. goto out;
  2482. }
  2483. lock_page(page);
  2484. if (!PageUptodate(page)) {
  2485. err = -EIO;
  2486. goto unlock;
  2487. }
  2488. if (page_has_buffers(page))
  2489. goto has_buffers;
  2490. }
  2491. zero_user(page, offset, length);
  2492. set_page_dirty(page);
  2493. err = 0;
  2494. unlock:
  2495. unlock_page(page);
  2496. page_cache_release(page);
  2497. out:
  2498. return err;
  2499. }
  2500. EXPORT_SYMBOL(nobh_truncate_page);
  2501. int block_truncate_page(struct address_space *mapping,
  2502. loff_t from, get_block_t *get_block)
  2503. {
  2504. pgoff_t index = from >> PAGE_CACHE_SHIFT;
  2505. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  2506. unsigned blocksize;
  2507. sector_t iblock;
  2508. unsigned length, pos;
  2509. struct inode *inode = mapping->host;
  2510. struct page *page;
  2511. struct buffer_head *bh;
  2512. int err;
  2513. blocksize = 1 << inode->i_blkbits;
  2514. length = offset & (blocksize - 1);
  2515. /* Block boundary? Nothing to do */
  2516. if (!length)
  2517. return 0;
  2518. length = blocksize - length;
  2519. iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  2520. page = grab_cache_page(mapping, index);
  2521. err = -ENOMEM;
  2522. if (!page)
  2523. goto out;
  2524. if (!page_has_buffers(page))
  2525. create_empty_buffers(page, blocksize, 0);
  2526. /* Find the buffer that contains "offset" */
  2527. bh = page_buffers(page);
  2528. pos = blocksize;
  2529. while (offset >= pos) {
  2530. bh = bh->b_this_page;
  2531. iblock++;
  2532. pos += blocksize;
  2533. }
  2534. err = 0;
  2535. if (!buffer_mapped(bh)) {
  2536. WARN_ON(bh->b_size != blocksize);
  2537. err = get_block(inode, iblock, bh, 0);
  2538. if (err)
  2539. goto unlock;
  2540. /* unmapped? It's a hole - nothing to do */
  2541. if (!buffer_mapped(bh))
  2542. goto unlock;
  2543. }
  2544. /* Ok, it's mapped. Make sure it's up-to-date */
  2545. if (PageUptodate(page))
  2546. set_buffer_uptodate(bh);
  2547. if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
  2548. err = -EIO;
  2549. ll_rw_block(READ, 1, &bh);
  2550. wait_on_buffer(bh);
  2551. /* Uhhuh. Read error. Complain and punt. */
  2552. if (!buffer_uptodate(bh))
  2553. goto unlock;
  2554. }
  2555. zero_user(page, offset, length);
  2556. mark_buffer_dirty(bh);
  2557. err = 0;
  2558. unlock:
  2559. unlock_page(page);
  2560. page_cache_release(page);
  2561. out:
  2562. return err;
  2563. }
  2564. EXPORT_SYMBOL(block_truncate_page);
  2565. /*
  2566. * The generic ->writepage function for buffer-backed address_spaces
  2567. */
  2568. int block_write_full_page(struct page *page, get_block_t *get_block,
  2569. struct writeback_control *wbc)
  2570. {
  2571. struct inode * const inode = page->mapping->host;
  2572. loff_t i_size = i_size_read(inode);
  2573. const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  2574. unsigned offset;
  2575. /* Is the page fully inside i_size? */
  2576. if (page->index < end_index)
  2577. return __block_write_full_page(inode, page, get_block, wbc,
  2578. end_buffer_async_write);
  2579. /* Is the page fully outside i_size? (truncate in progress) */
  2580. offset = i_size & (PAGE_CACHE_SIZE-1);
  2581. if (page->index >= end_index+1 || !offset) {
  2582. /*
  2583. * The page may have dirty, unmapped buffers. For example,
  2584. * they may have been added in ext3_writepage(). Make them
  2585. * freeable here, so the page does not leak.
  2586. */
  2587. do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
  2588. unlock_page(page);
  2589. return 0; /* don't care */
  2590. }
  2591. /*
  2592. * The page straddles i_size. It must be zeroed out on each and every
  2593. * writepage invocation because it may be mmapped. "A file is mapped
  2594. * in multiples of the page size. For a file that is not a multiple of
  2595. * the page size, the remaining memory is zeroed when mapped, and
  2596. * writes to that region are not written out to the file."
  2597. */
  2598. zero_user_segment(page, offset, PAGE_CACHE_SIZE);
  2599. return __block_write_full_page(inode, page, get_block, wbc,
  2600. end_buffer_async_write);
  2601. }
  2602. EXPORT_SYMBOL(block_write_full_page);
  2603. sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
  2604. get_block_t *get_block)
  2605. {
  2606. struct buffer_head tmp;
  2607. struct inode *inode = mapping->host;
  2608. tmp.b_state = 0;
  2609. tmp.b_blocknr = 0;
  2610. tmp.b_size = 1 << inode->i_blkbits;
  2611. get_block(inode, block, &tmp, 0);
  2612. return tmp.b_blocknr;
  2613. }
  2614. EXPORT_SYMBOL(generic_block_bmap);
  2615. static void end_bio_bh_io_sync(struct bio *bio)
  2616. {
  2617. struct buffer_head *bh = bio->bi_private;
  2618. if (unlikely(bio_flagged(bio, BIO_QUIET)))
  2619. set_bit(BH_Quiet, &bh->b_state);
  2620. bh->b_end_io(bh, !bio->bi_error);
  2621. bio_put(bio);
  2622. }
  2623. /*
  2624. * This allows us to do IO even on the odd last sectors
  2625. * of a device, even if the block size is some multiple
  2626. * of the physical sector size.
  2627. *
  2628. * We'll just truncate the bio to the size of the device,
  2629. * and clear the end of the buffer head manually.
  2630. *
  2631. * Truly out-of-range accesses will turn into actual IO
  2632. * errors, this only handles the "we need to be able to
  2633. * do IO at the final sector" case.
  2634. */
  2635. void guard_bio_eod(int rw, struct bio *bio)
  2636. {
  2637. sector_t maxsector;
  2638. struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
  2639. unsigned truncated_bytes;
  2640. maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
  2641. if (!maxsector)
  2642. return;
  2643. /*
  2644. * If the *whole* IO is past the end of the device,
  2645. * let it through, and the IO layer will turn it into
  2646. * an EIO.
  2647. */
  2648. if (unlikely(bio->bi_iter.bi_sector >= maxsector))
  2649. return;
  2650. maxsector -= bio->bi_iter.bi_sector;
  2651. if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
  2652. return;
  2653. /* Uhhuh. We've got a bio that straddles the device size! */
  2654. truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
  2655. /* Truncate the bio.. */
  2656. bio->bi_iter.bi_size -= truncated_bytes;
  2657. bvec->bv_len -= truncated_bytes;
  2658. /* ..and clear the end of the buffer for reads */
  2659. if ((rw & RW_MASK) == READ) {
  2660. zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len,
  2661. truncated_bytes);
  2662. }
  2663. }
  2664. static int submit_bh_wbc(int rw, struct buffer_head *bh,
  2665. unsigned long bio_flags, struct writeback_control *wbc)
  2666. {
  2667. struct bio *bio;
  2668. BUG_ON(!buffer_locked(bh));
  2669. BUG_ON(!buffer_mapped(bh));
  2670. BUG_ON(!bh->b_end_io);
  2671. BUG_ON(buffer_delay(bh));
  2672. BUG_ON(buffer_unwritten(bh));
  2673. /*
  2674. * Only clear out a write error when rewriting
  2675. */
  2676. if (test_set_buffer_req(bh) && (rw & WRITE))
  2677. clear_buffer_write_io_error(bh);
  2678. /*
  2679. * from here on down, it's all bio -- do the initial mapping,
  2680. * submit_bio -> generic_make_request may further map this bio around
  2681. */
  2682. bio = bio_alloc(GFP_NOIO, 1);
  2683. if (wbc) {
  2684. wbc_init_bio(wbc, bio);
  2685. wbc_account_io(wbc, bh->b_page, bh->b_size);
  2686. }
  2687. bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
  2688. bio->bi_bdev = bh->b_bdev;
  2689. bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
  2690. BUG_ON(bio->bi_iter.bi_size != bh->b_size);
  2691. bio->bi_end_io = end_bio_bh_io_sync;
  2692. bio->bi_private = bh;
  2693. bio->bi_flags |= bio_flags;
  2694. /* Take care of bh's that straddle the end of the device */
  2695. guard_bio_eod(rw, bio);
  2696. if (buffer_meta(bh))
  2697. rw |= REQ_META;
  2698. if (buffer_prio(bh))
  2699. rw |= REQ_PRIO;
  2700. submit_bio(rw, bio);
  2701. return 0;
  2702. }
  2703. int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
  2704. {
  2705. return submit_bh_wbc(rw, bh, bio_flags, NULL);
  2706. }
  2707. EXPORT_SYMBOL_GPL(_submit_bh);
  2708. int submit_bh(int rw, struct buffer_head *bh)
  2709. {
  2710. return submit_bh_wbc(rw, bh, 0, NULL);
  2711. }
  2712. EXPORT_SYMBOL(submit_bh);
  2713. /**
  2714. * ll_rw_block: low-level access to block devices (DEPRECATED)
  2715. * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
  2716. * @nr: number of &struct buffer_heads in the array
  2717. * @bhs: array of pointers to &struct buffer_head
  2718. *
  2719. * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
  2720. * requests an I/O operation on them, either a %READ or a %WRITE. The third
  2721. * %READA option is described in the documentation for generic_make_request()
  2722. * which ll_rw_block() calls.
  2723. *
  2724. * This function drops any buffer that it cannot get a lock on (with the
  2725. * BH_Lock state bit), any buffer that appears to be clean when doing a write
  2726. * request, and any buffer that appears to be up-to-date when doing read
  2727. * request. Further it marks as clean buffers that are processed for
  2728. * writing (the buffer cache won't assume that they are actually clean
  2729. * until the buffer gets unlocked).
  2730. *
  2731. * ll_rw_block sets b_end_io to simple completion handler that marks
  2732. * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
  2733. * any waiters.
  2734. *
  2735. * All of the buffers must be for the same device, and must also be a
  2736. * multiple of the current approved size for the device.
  2737. */
  2738. void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
  2739. {
  2740. int i;
  2741. for (i = 0; i < nr; i++) {
  2742. struct buffer_head *bh = bhs[i];
  2743. if (!trylock_buffer(bh))
  2744. continue;
  2745. if (rw == WRITE) {
  2746. if (test_clear_buffer_dirty(bh)) {
  2747. bh->b_end_io = end_buffer_write_sync;
  2748. get_bh(bh);
  2749. submit_bh(WRITE, bh);
  2750. continue;
  2751. }
  2752. } else {
  2753. if (!buffer_uptodate(bh)) {
  2754. bh->b_end_io = end_buffer_read_sync;
  2755. get_bh(bh);
  2756. submit_bh(rw, bh);
  2757. continue;
  2758. }
  2759. }
  2760. unlock_buffer(bh);
  2761. }
  2762. }
  2763. EXPORT_SYMBOL(ll_rw_block);
  2764. void write_dirty_buffer(struct buffer_head *bh, int rw)
  2765. {
  2766. lock_buffer(bh);
  2767. if (!test_clear_buffer_dirty(bh)) {
  2768. unlock_buffer(bh);
  2769. return;
  2770. }
  2771. bh->b_end_io = end_buffer_write_sync;
  2772. get_bh(bh);
  2773. submit_bh(rw, bh);
  2774. }
  2775. EXPORT_SYMBOL(write_dirty_buffer);
  2776. /*
  2777. * For a data-integrity writeout, we need to wait upon any in-progress I/O
  2778. * and then start new I/O and then wait upon it. The caller must have a ref on
  2779. * the buffer_head.
  2780. */
  2781. int __sync_dirty_buffer(struct buffer_head *bh, int rw)
  2782. {
  2783. int ret = 0;
  2784. WARN_ON(atomic_read(&bh->b_count) < 1);
  2785. lock_buffer(bh);
  2786. if (test_clear_buffer_dirty(bh)) {
  2787. get_bh(bh);
  2788. bh->b_end_io = end_buffer_write_sync;
  2789. ret = submit_bh(rw, bh);
  2790. wait_on_buffer(bh);
  2791. if (!ret && !buffer_uptodate(bh))
  2792. ret = -EIO;
  2793. } else {
  2794. unlock_buffer(bh);
  2795. }
  2796. return ret;
  2797. }
  2798. EXPORT_SYMBOL(__sync_dirty_buffer);
  2799. int sync_dirty_buffer(struct buffer_head *bh)
  2800. {
  2801. return __sync_dirty_buffer(bh, WRITE_SYNC);
  2802. }
  2803. EXPORT_SYMBOL(sync_dirty_buffer);
  2804. /*
  2805. * try_to_free_buffers() checks if all the buffers on this particular page
  2806. * are unused, and releases them if so.
  2807. *
  2808. * Exclusion against try_to_free_buffers may be obtained by either
  2809. * locking the page or by holding its mapping's private_lock.
  2810. *
  2811. * If the page is dirty but all the buffers are clean then we need to
  2812. * be sure to mark the page clean as well. This is because the page
  2813. * may be against a block device, and a later reattachment of buffers
  2814. * to a dirty page will set *all* buffers dirty. Which would corrupt
  2815. * filesystem data on the same device.
  2816. *
  2817. * The same applies to regular filesystem pages: if all the buffers are
  2818. * clean then we set the page clean and proceed. To do that, we require
  2819. * total exclusion from __set_page_dirty_buffers(). That is obtained with
  2820. * private_lock.
  2821. *
  2822. * try_to_free_buffers() is non-blocking.
  2823. */
  2824. static inline int buffer_busy(struct buffer_head *bh)
  2825. {
  2826. return atomic_read(&bh->b_count) |
  2827. (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
  2828. }
  2829. static int
  2830. drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
  2831. {
  2832. struct buffer_head *head = page_buffers(page);
  2833. struct buffer_head *bh;
  2834. bh = head;
  2835. do {
  2836. if (buffer_write_io_error(bh) && page->mapping)
  2837. set_bit(AS_EIO, &page->mapping->flags);
  2838. if (buffer_busy(bh))
  2839. goto failed;
  2840. bh = bh->b_this_page;
  2841. } while (bh != head);
  2842. do {
  2843. struct buffer_head *next = bh->b_this_page;
  2844. if (bh->b_assoc_map)
  2845. __remove_assoc_queue(bh);
  2846. bh = next;
  2847. } while (bh != head);
  2848. *buffers_to_free = head;
  2849. __clear_page_buffers(page);
  2850. return 1;
  2851. failed:
  2852. return 0;
  2853. }
  2854. int try_to_free_buffers(struct page *page)
  2855. {
  2856. struct address_space * const mapping = page->mapping;
  2857. struct buffer_head *buffers_to_free = NULL;
  2858. int ret = 0;
  2859. BUG_ON(!PageLocked(page));
  2860. if (PageWriteback(page))
  2861. return 0;
  2862. if (mapping == NULL) { /* can this still happen? */
  2863. ret = drop_buffers(page, &buffers_to_free);
  2864. goto out;
  2865. }
  2866. spin_lock(&mapping->private_lock);
  2867. ret = drop_buffers(page, &buffers_to_free);
  2868. /*
  2869. * If the filesystem writes its buffers by hand (eg ext3)
  2870. * then we can have clean buffers against a dirty page. We
  2871. * clean the page here; otherwise the VM will never notice
  2872. * that the filesystem did any IO at all.
  2873. *
  2874. * Also, during truncate, discard_buffer will have marked all
  2875. * the page's buffers clean. We discover that here and clean
  2876. * the page also.
  2877. *
  2878. * private_lock must be held over this entire operation in order
  2879. * to synchronise against __set_page_dirty_buffers and prevent the
  2880. * dirty bit from being lost.
  2881. */
  2882. if (ret)
  2883. cancel_dirty_page(page);
  2884. spin_unlock(&mapping->private_lock);
  2885. out:
  2886. if (buffers_to_free) {
  2887. struct buffer_head *bh = buffers_to_free;
  2888. do {
  2889. struct buffer_head *next = bh->b_this_page;
  2890. free_buffer_head(bh);
  2891. bh = next;
  2892. } while (bh != buffers_to_free);
  2893. }
  2894. return ret;
  2895. }
  2896. EXPORT_SYMBOL(try_to_free_buffers);
  2897. /*
  2898. * There are no bdflush tunables left. But distributions are
  2899. * still running obsolete flush daemons, so we terminate them here.
  2900. *
  2901. * Use of bdflush() is deprecated and will be removed in a future kernel.
  2902. * The `flush-X' kernel threads fully replace bdflush daemons and this call.
  2903. */
  2904. SYSCALL_DEFINE2(bdflush, int, func, long, data)
  2905. {
  2906. static int msg_count;
  2907. if (!capable(CAP_SYS_ADMIN))
  2908. return -EPERM;
  2909. if (msg_count < 5) {
  2910. msg_count++;
  2911. printk(KERN_INFO
  2912. "warning: process `%s' used the obsolete bdflush"
  2913. " system call\n", current->comm);
  2914. printk(KERN_INFO "Fix your initscripts?\n");
  2915. }
  2916. if (func == 1)
  2917. do_exit(0);
  2918. return 0;
  2919. }
  2920. /*
  2921. * Buffer-head allocation
  2922. */
  2923. static struct kmem_cache *bh_cachep __read_mostly;
  2924. /*
  2925. * Once the number of bh's in the machine exceeds this level, we start
  2926. * stripping them in writeback.
  2927. */
  2928. static unsigned long max_buffer_heads;
  2929. int buffer_heads_over_limit;
  2930. struct bh_accounting {
  2931. int nr; /* Number of live bh's */
  2932. int ratelimit; /* Limit cacheline bouncing */
  2933. };
  2934. static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
  2935. static void recalc_bh_state(void)
  2936. {
  2937. int i;
  2938. int tot = 0;
  2939. if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
  2940. return;
  2941. __this_cpu_write(bh_accounting.ratelimit, 0);
  2942. for_each_online_cpu(i)
  2943. tot += per_cpu(bh_accounting, i).nr;
  2944. buffer_heads_over_limit = (tot > max_buffer_heads);
  2945. }
  2946. struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
  2947. {
  2948. struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
  2949. if (ret) {
  2950. INIT_LIST_HEAD(&ret->b_assoc_buffers);
  2951. preempt_disable();
  2952. __this_cpu_inc(bh_accounting.nr);
  2953. recalc_bh_state();
  2954. preempt_enable();
  2955. }
  2956. return ret;
  2957. }
  2958. EXPORT_SYMBOL(alloc_buffer_head);
  2959. void free_buffer_head(struct buffer_head *bh)
  2960. {
  2961. BUG_ON(!list_empty(&bh->b_assoc_buffers));
  2962. kmem_cache_free(bh_cachep, bh);
  2963. preempt_disable();
  2964. __this_cpu_dec(bh_accounting.nr);
  2965. recalc_bh_state();
  2966. preempt_enable();
  2967. }
  2968. EXPORT_SYMBOL(free_buffer_head);
  2969. static void buffer_exit_cpu(int cpu)
  2970. {
  2971. int i;
  2972. struct bh_lru *b = &per_cpu(bh_lrus, cpu);
  2973. for (i = 0; i < BH_LRU_SIZE; i++) {
  2974. brelse(b->bhs[i]);
  2975. b->bhs[i] = NULL;
  2976. }
  2977. this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
  2978. per_cpu(bh_accounting, cpu).nr = 0;
  2979. }
  2980. static int buffer_cpu_notify(struct notifier_block *self,
  2981. unsigned long action, void *hcpu)
  2982. {
  2983. if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
  2984. buffer_exit_cpu((unsigned long)hcpu);
  2985. return NOTIFY_OK;
  2986. }
  2987. /**
  2988. * bh_uptodate_or_lock - Test whether the buffer is uptodate
  2989. * @bh: struct buffer_head
  2990. *
  2991. * Return true if the buffer is up-to-date and false,
  2992. * with the buffer locked, if not.
  2993. */
  2994. int bh_uptodate_or_lock(struct buffer_head *bh)
  2995. {
  2996. if (!buffer_uptodate(bh)) {
  2997. lock_buffer(bh);
  2998. if (!buffer_uptodate(bh))
  2999. return 0;
  3000. unlock_buffer(bh);
  3001. }
  3002. return 1;
  3003. }
  3004. EXPORT_SYMBOL(bh_uptodate_or_lock);
  3005. /**
  3006. * bh_submit_read - Submit a locked buffer for reading
  3007. * @bh: struct buffer_head
  3008. *
  3009. * Returns zero on success and -EIO on error.
  3010. */
  3011. int bh_submit_read(struct buffer_head *bh)
  3012. {
  3013. BUG_ON(!buffer_locked(bh));
  3014. if (buffer_uptodate(bh)) {
  3015. unlock_buffer(bh);
  3016. return 0;
  3017. }
  3018. get_bh(bh);
  3019. bh->b_end_io = end_buffer_read_sync;
  3020. submit_bh(READ, bh);
  3021. wait_on_buffer(bh);
  3022. if (buffer_uptodate(bh))
  3023. return 0;
  3024. return -EIO;
  3025. }
  3026. EXPORT_SYMBOL(bh_submit_read);
  3027. void __init buffer_init(void)
  3028. {
  3029. unsigned long nrpages;
  3030. bh_cachep = kmem_cache_create("buffer_head",
  3031. sizeof(struct buffer_head), 0,
  3032. (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
  3033. SLAB_MEM_SPREAD),
  3034. NULL);
  3035. /*
  3036. * Limit the bh occupancy to 10% of ZONE_NORMAL
  3037. */
  3038. nrpages = (nr_free_buffer_pages() * 10) / 100;
  3039. max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
  3040. hotcpu_notifier(buffer_cpu_notify, 0);
  3041. }