xfs_buf.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include <linux/stddef.h>
  8. #include <linux/errno.h>
  9. #include <linux/gfp.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/init.h>
  12. #include <linux/vmalloc.h>
  13. #include <linux/bio.h>
  14. #include <linux/sysctl.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/workqueue.h>
  17. #include <linux/percpu.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/hash.h>
  20. #include <linux/kthread.h>
  21. #include <linux/migrate.h>
  22. #include <linux/backing-dev.h>
  23. #include <linux/freezer.h>
  24. #include "xfs_format.h"
  25. #include "xfs_log_format.h"
  26. #include "xfs_trans_resv.h"
  27. #include "xfs_sb.h"
  28. #include "xfs_mount.h"
  29. #include "xfs_trace.h"
  30. #include "xfs_log.h"
  31. #include "xfs_errortag.h"
  32. #include "xfs_error.h"
  33. static kmem_zone_t *xfs_buf_zone;
  34. #ifdef XFS_BUF_LOCK_TRACKING
  35. # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
  36. # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
  37. # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
  38. #else
  39. # define XB_SET_OWNER(bp) do { } while (0)
  40. # define XB_CLEAR_OWNER(bp) do { } while (0)
  41. # define XB_GET_OWNER(bp) do { } while (0)
  42. #endif
  43. #define xb_to_gfp(flags) \
  44. ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
  45. static inline int
  46. xfs_buf_is_vmapped(
  47. struct xfs_buf *bp)
  48. {
  49. /*
  50. * Return true if the buffer is vmapped.
  51. *
  52. * b_addr is null if the buffer is not mapped, but the code is clever
  53. * enough to know it doesn't have to map a single page, so the check has
  54. * to be both for b_addr and bp->b_page_count > 1.
  55. */
  56. return bp->b_addr && bp->b_page_count > 1;
  57. }
  58. static inline int
  59. xfs_buf_vmap_len(
  60. struct xfs_buf *bp)
  61. {
  62. return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
  63. }
  64. /*
  65. * Bump the I/O in flight count on the buftarg if we haven't yet done so for
  66. * this buffer. The count is incremented once per buffer (per hold cycle)
  67. * because the corresponding decrement is deferred to buffer release. Buffers
  68. * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
  69. * tracking adds unnecessary overhead. This is used for sychronization purposes
  70. * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
  71. * in-flight buffers.
  72. *
  73. * Buffers that are never released (e.g., superblock, iclog buffers) must set
  74. * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
  75. * never reaches zero and unmount hangs indefinitely.
  76. */
  77. static inline void
  78. xfs_buf_ioacct_inc(
  79. struct xfs_buf *bp)
  80. {
  81. if (bp->b_flags & XBF_NO_IOACCT)
  82. return;
  83. ASSERT(bp->b_flags & XBF_ASYNC);
  84. spin_lock(&bp->b_lock);
  85. if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
  86. bp->b_state |= XFS_BSTATE_IN_FLIGHT;
  87. percpu_counter_inc(&bp->b_target->bt_io_count);
  88. }
  89. spin_unlock(&bp->b_lock);
  90. }
  91. /*
  92. * Clear the in-flight state on a buffer about to be released to the LRU or
  93. * freed and unaccount from the buftarg.
  94. */
  95. static inline void
  96. __xfs_buf_ioacct_dec(
  97. struct xfs_buf *bp)
  98. {
  99. lockdep_assert_held(&bp->b_lock);
  100. if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
  101. bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
  102. percpu_counter_dec(&bp->b_target->bt_io_count);
  103. }
  104. }
  105. static inline void
  106. xfs_buf_ioacct_dec(
  107. struct xfs_buf *bp)
  108. {
  109. spin_lock(&bp->b_lock);
  110. __xfs_buf_ioacct_dec(bp);
  111. spin_unlock(&bp->b_lock);
  112. }
  113. /*
  114. * When we mark a buffer stale, we remove the buffer from the LRU and clear the
  115. * b_lru_ref count so that the buffer is freed immediately when the buffer
  116. * reference count falls to zero. If the buffer is already on the LRU, we need
  117. * to remove the reference that LRU holds on the buffer.
  118. *
  119. * This prevents build-up of stale buffers on the LRU.
  120. */
  121. void
  122. xfs_buf_stale(
  123. struct xfs_buf *bp)
  124. {
  125. ASSERT(xfs_buf_islocked(bp));
  126. bp->b_flags |= XBF_STALE;
  127. /*
  128. * Clear the delwri status so that a delwri queue walker will not
  129. * flush this buffer to disk now that it is stale. The delwri queue has
  130. * a reference to the buffer, so this is safe to do.
  131. */
  132. bp->b_flags &= ~_XBF_DELWRI_Q;
  133. /*
  134. * Once the buffer is marked stale and unlocked, a subsequent lookup
  135. * could reset b_flags. There is no guarantee that the buffer is
  136. * unaccounted (released to LRU) before that occurs. Drop in-flight
  137. * status now to preserve accounting consistency.
  138. */
  139. spin_lock(&bp->b_lock);
  140. __xfs_buf_ioacct_dec(bp);
  141. atomic_set(&bp->b_lru_ref, 0);
  142. if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
  143. (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
  144. atomic_dec(&bp->b_hold);
  145. ASSERT(atomic_read(&bp->b_hold) >= 1);
  146. spin_unlock(&bp->b_lock);
  147. }
  148. static int
  149. xfs_buf_get_maps(
  150. struct xfs_buf *bp,
  151. int map_count)
  152. {
  153. ASSERT(bp->b_maps == NULL);
  154. bp->b_map_count = map_count;
  155. if (map_count == 1) {
  156. bp->b_maps = &bp->__b_map;
  157. return 0;
  158. }
  159. bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
  160. KM_NOFS);
  161. if (!bp->b_maps)
  162. return -ENOMEM;
  163. return 0;
  164. }
  165. /*
  166. * Frees b_pages if it was allocated.
  167. */
  168. static void
  169. xfs_buf_free_maps(
  170. struct xfs_buf *bp)
  171. {
  172. if (bp->b_maps != &bp->__b_map) {
  173. kmem_free(bp->b_maps);
  174. bp->b_maps = NULL;
  175. }
  176. }
  177. struct xfs_buf *
  178. _xfs_buf_alloc(
  179. struct xfs_buftarg *target,
  180. struct xfs_buf_map *map,
  181. int nmaps,
  182. xfs_buf_flags_t flags)
  183. {
  184. struct xfs_buf *bp;
  185. int error;
  186. int i;
  187. bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
  188. if (unlikely(!bp))
  189. return NULL;
  190. /*
  191. * We don't want certain flags to appear in b_flags unless they are
  192. * specifically set by later operations on the buffer.
  193. */
  194. flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
  195. atomic_set(&bp->b_hold, 1);
  196. atomic_set(&bp->b_lru_ref, 1);
  197. init_completion(&bp->b_iowait);
  198. INIT_LIST_HEAD(&bp->b_lru);
  199. INIT_LIST_HEAD(&bp->b_list);
  200. INIT_LIST_HEAD(&bp->b_li_list);
  201. sema_init(&bp->b_sema, 0); /* held, no waiters */
  202. spin_lock_init(&bp->b_lock);
  203. XB_SET_OWNER(bp);
  204. bp->b_target = target;
  205. bp->b_flags = flags;
  206. /*
  207. * Set length and io_length to the same value initially.
  208. * I/O routines should use io_length, which will be the same in
  209. * most cases but may be reset (e.g. XFS recovery).
  210. */
  211. error = xfs_buf_get_maps(bp, nmaps);
  212. if (error) {
  213. kmem_zone_free(xfs_buf_zone, bp);
  214. return NULL;
  215. }
  216. bp->b_bn = map[0].bm_bn;
  217. bp->b_length = 0;
  218. for (i = 0; i < nmaps; i++) {
  219. bp->b_maps[i].bm_bn = map[i].bm_bn;
  220. bp->b_maps[i].bm_len = map[i].bm_len;
  221. bp->b_length += map[i].bm_len;
  222. }
  223. bp->b_io_length = bp->b_length;
  224. atomic_set(&bp->b_pin_count, 0);
  225. init_waitqueue_head(&bp->b_waiters);
  226. XFS_STATS_INC(target->bt_mount, xb_create);
  227. trace_xfs_buf_init(bp, _RET_IP_);
  228. return bp;
  229. }
  230. /*
  231. * Allocate a page array capable of holding a specified number
  232. * of pages, and point the page buf at it.
  233. */
  234. STATIC int
  235. _xfs_buf_get_pages(
  236. xfs_buf_t *bp,
  237. int page_count)
  238. {
  239. /* Make sure that we have a page list */
  240. if (bp->b_pages == NULL) {
  241. bp->b_page_count = page_count;
  242. if (page_count <= XB_PAGES) {
  243. bp->b_pages = bp->b_page_array;
  244. } else {
  245. bp->b_pages = kmem_alloc(sizeof(struct page *) *
  246. page_count, KM_NOFS);
  247. if (bp->b_pages == NULL)
  248. return -ENOMEM;
  249. }
  250. memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
  251. }
  252. return 0;
  253. }
  254. /*
  255. * Frees b_pages if it was allocated.
  256. */
  257. STATIC void
  258. _xfs_buf_free_pages(
  259. xfs_buf_t *bp)
  260. {
  261. if (bp->b_pages != bp->b_page_array) {
  262. kmem_free(bp->b_pages);
  263. bp->b_pages = NULL;
  264. }
  265. }
  266. /*
  267. * Releases the specified buffer.
  268. *
  269. * The modification state of any associated pages is left unchanged.
  270. * The buffer must not be on any hash - use xfs_buf_rele instead for
  271. * hashed and refcounted buffers
  272. */
  273. void
  274. xfs_buf_free(
  275. xfs_buf_t *bp)
  276. {
  277. trace_xfs_buf_free(bp, _RET_IP_);
  278. ASSERT(list_empty(&bp->b_lru));
  279. if (bp->b_flags & _XBF_PAGES) {
  280. uint i;
  281. if (xfs_buf_is_vmapped(bp))
  282. vm_unmap_ram(bp->b_addr - bp->b_offset,
  283. bp->b_page_count);
  284. for (i = 0; i < bp->b_page_count; i++) {
  285. struct page *page = bp->b_pages[i];
  286. __free_page(page);
  287. }
  288. } else if (bp->b_flags & _XBF_KMEM)
  289. kmem_free(bp->b_addr);
  290. _xfs_buf_free_pages(bp);
  291. xfs_buf_free_maps(bp);
  292. kmem_zone_free(xfs_buf_zone, bp);
  293. }
  294. /*
  295. * Allocates all the pages for buffer in question and builds it's page list.
  296. */
  297. STATIC int
  298. xfs_buf_allocate_memory(
  299. xfs_buf_t *bp,
  300. uint flags)
  301. {
  302. size_t size;
  303. size_t nbytes, offset;
  304. gfp_t gfp_mask = xb_to_gfp(flags);
  305. unsigned short page_count, i;
  306. xfs_off_t start, end;
  307. int error;
  308. /*
  309. * for buffers that are contained within a single page, just allocate
  310. * the memory from the heap - there's no need for the complexity of
  311. * page arrays to keep allocation down to order 0.
  312. */
  313. size = BBTOB(bp->b_length);
  314. if (size < PAGE_SIZE) {
  315. bp->b_addr = kmem_alloc(size, KM_NOFS);
  316. if (!bp->b_addr) {
  317. /* low memory - use alloc_page loop instead */
  318. goto use_alloc_page;
  319. }
  320. if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
  321. ((unsigned long)bp->b_addr & PAGE_MASK)) {
  322. /* b_addr spans two pages - use alloc_page instead */
  323. kmem_free(bp->b_addr);
  324. bp->b_addr = NULL;
  325. goto use_alloc_page;
  326. }
  327. bp->b_offset = offset_in_page(bp->b_addr);
  328. bp->b_pages = bp->b_page_array;
  329. bp->b_pages[0] = virt_to_page(bp->b_addr);
  330. bp->b_page_count = 1;
  331. bp->b_flags |= _XBF_KMEM;
  332. return 0;
  333. }
  334. use_alloc_page:
  335. start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
  336. end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
  337. >> PAGE_SHIFT;
  338. page_count = end - start;
  339. error = _xfs_buf_get_pages(bp, page_count);
  340. if (unlikely(error))
  341. return error;
  342. offset = bp->b_offset;
  343. bp->b_flags |= _XBF_PAGES;
  344. for (i = 0; i < bp->b_page_count; i++) {
  345. struct page *page;
  346. uint retries = 0;
  347. retry:
  348. page = alloc_page(gfp_mask);
  349. if (unlikely(page == NULL)) {
  350. if (flags & XBF_READ_AHEAD) {
  351. bp->b_page_count = i;
  352. error = -ENOMEM;
  353. goto out_free_pages;
  354. }
  355. /*
  356. * This could deadlock.
  357. *
  358. * But until all the XFS lowlevel code is revamped to
  359. * handle buffer allocation failures we can't do much.
  360. */
  361. if (!(++retries % 100))
  362. xfs_err(NULL,
  363. "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
  364. current->comm, current->pid,
  365. __func__, gfp_mask);
  366. XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries);
  367. congestion_wait(BLK_RW_ASYNC, HZ/50);
  368. goto retry;
  369. }
  370. XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found);
  371. nbytes = min_t(size_t, size, PAGE_SIZE - offset);
  372. size -= nbytes;
  373. bp->b_pages[i] = page;
  374. offset = 0;
  375. }
  376. return 0;
  377. out_free_pages:
  378. for (i = 0; i < bp->b_page_count; i++)
  379. __free_page(bp->b_pages[i]);
  380. bp->b_flags &= ~_XBF_PAGES;
  381. return error;
  382. }
  383. /*
  384. * Map buffer into kernel address-space if necessary.
  385. */
  386. STATIC int
  387. _xfs_buf_map_pages(
  388. xfs_buf_t *bp,
  389. uint flags)
  390. {
  391. ASSERT(bp->b_flags & _XBF_PAGES);
  392. if (bp->b_page_count == 1) {
  393. /* A single page buffer is always mappable */
  394. bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
  395. } else if (flags & XBF_UNMAPPED) {
  396. bp->b_addr = NULL;
  397. } else {
  398. int retried = 0;
  399. unsigned nofs_flag;
  400. /*
  401. * vm_map_ram() will allocate auxillary structures (e.g.
  402. * pagetables) with GFP_KERNEL, yet we are likely to be under
  403. * GFP_NOFS context here. Hence we need to tell memory reclaim
  404. * that we are in such a context via PF_MEMALLOC_NOFS to prevent
  405. * memory reclaim re-entering the filesystem here and
  406. * potentially deadlocking.
  407. */
  408. nofs_flag = memalloc_nofs_save();
  409. do {
  410. bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
  411. -1, PAGE_KERNEL);
  412. if (bp->b_addr)
  413. break;
  414. vm_unmap_aliases();
  415. } while (retried++ <= 1);
  416. memalloc_nofs_restore(nofs_flag);
  417. if (!bp->b_addr)
  418. return -ENOMEM;
  419. bp->b_addr += bp->b_offset;
  420. }
  421. return 0;
  422. }
  423. /*
  424. * Finding and Reading Buffers
  425. */
  426. static int
  427. _xfs_buf_obj_cmp(
  428. struct rhashtable_compare_arg *arg,
  429. const void *obj)
  430. {
  431. const struct xfs_buf_map *map = arg->key;
  432. const struct xfs_buf *bp = obj;
  433. /*
  434. * The key hashing in the lookup path depends on the key being the
  435. * first element of the compare_arg, make sure to assert this.
  436. */
  437. BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
  438. if (bp->b_bn != map->bm_bn)
  439. return 1;
  440. if (unlikely(bp->b_length != map->bm_len)) {
  441. /*
  442. * found a block number match. If the range doesn't
  443. * match, the only way this is allowed is if the buffer
  444. * in the cache is stale and the transaction that made
  445. * it stale has not yet committed. i.e. we are
  446. * reallocating a busy extent. Skip this buffer and
  447. * continue searching for an exact match.
  448. */
  449. ASSERT(bp->b_flags & XBF_STALE);
  450. return 1;
  451. }
  452. return 0;
  453. }
  454. static const struct rhashtable_params xfs_buf_hash_params = {
  455. .min_size = 32, /* empty AGs have minimal footprint */
  456. .nelem_hint = 16,
  457. .key_len = sizeof(xfs_daddr_t),
  458. .key_offset = offsetof(struct xfs_buf, b_bn),
  459. .head_offset = offsetof(struct xfs_buf, b_rhash_head),
  460. .automatic_shrinking = true,
  461. .obj_cmpfn = _xfs_buf_obj_cmp,
  462. };
  463. int
  464. xfs_buf_hash_init(
  465. struct xfs_perag *pag)
  466. {
  467. spin_lock_init(&pag->pag_buf_lock);
  468. return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params);
  469. }
  470. void
  471. xfs_buf_hash_destroy(
  472. struct xfs_perag *pag)
  473. {
  474. rhashtable_destroy(&pag->pag_buf_hash);
  475. }
  476. /*
  477. * Look up a buffer in the buffer cache and return it referenced and locked
  478. * in @found_bp.
  479. *
  480. * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
  481. * cache.
  482. *
  483. * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
  484. * -EAGAIN if we fail to lock it.
  485. *
  486. * Return values are:
  487. * -EFSCORRUPTED if have been supplied with an invalid address
  488. * -EAGAIN on trylock failure
  489. * -ENOENT if we fail to find a match and @new_bp was NULL
  490. * 0, with @found_bp:
  491. * - @new_bp if we inserted it into the cache
  492. * - the buffer we found and locked.
  493. */
  494. static int
  495. xfs_buf_find(
  496. struct xfs_buftarg *btp,
  497. struct xfs_buf_map *map,
  498. int nmaps,
  499. xfs_buf_flags_t flags,
  500. struct xfs_buf *new_bp,
  501. struct xfs_buf **found_bp)
  502. {
  503. struct xfs_perag *pag;
  504. xfs_buf_t *bp;
  505. struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
  506. xfs_daddr_t eofs;
  507. int i;
  508. *found_bp = NULL;
  509. for (i = 0; i < nmaps; i++)
  510. cmap.bm_len += map[i].bm_len;
  511. /* Check for IOs smaller than the sector size / not sector aligned */
  512. ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize));
  513. ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
  514. /*
  515. * Corrupted block numbers can get through to here, unfortunately, so we
  516. * have to check that the buffer falls within the filesystem bounds.
  517. */
  518. eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
  519. if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
  520. xfs_alert(btp->bt_mount,
  521. "%s: daddr 0x%llx out of range, EOFS 0x%llx",
  522. __func__, cmap.bm_bn, eofs);
  523. WARN_ON(1);
  524. return -EFSCORRUPTED;
  525. }
  526. pag = xfs_perag_get(btp->bt_mount,
  527. xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
  528. spin_lock(&pag->pag_buf_lock);
  529. bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
  530. xfs_buf_hash_params);
  531. if (bp) {
  532. atomic_inc(&bp->b_hold);
  533. goto found;
  534. }
  535. /* No match found */
  536. if (!new_bp) {
  537. XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
  538. spin_unlock(&pag->pag_buf_lock);
  539. xfs_perag_put(pag);
  540. return -ENOENT;
  541. }
  542. /* the buffer keeps the perag reference until it is freed */
  543. new_bp->b_pag = pag;
  544. rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
  545. xfs_buf_hash_params);
  546. spin_unlock(&pag->pag_buf_lock);
  547. *found_bp = new_bp;
  548. return 0;
  549. found:
  550. spin_unlock(&pag->pag_buf_lock);
  551. xfs_perag_put(pag);
  552. if (!xfs_buf_trylock(bp)) {
  553. if (flags & XBF_TRYLOCK) {
  554. xfs_buf_rele(bp);
  555. XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
  556. return -EAGAIN;
  557. }
  558. xfs_buf_lock(bp);
  559. XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
  560. }
  561. /*
  562. * if the buffer is stale, clear all the external state associated with
  563. * it. We need to keep flags such as how we allocated the buffer memory
  564. * intact here.
  565. */
  566. if (bp->b_flags & XBF_STALE) {
  567. ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
  568. ASSERT(bp->b_iodone == NULL);
  569. bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
  570. bp->b_ops = NULL;
  571. }
  572. trace_xfs_buf_find(bp, flags, _RET_IP_);
  573. XFS_STATS_INC(btp->bt_mount, xb_get_locked);
  574. *found_bp = bp;
  575. return 0;
  576. }
  577. struct xfs_buf *
  578. xfs_buf_incore(
  579. struct xfs_buftarg *target,
  580. xfs_daddr_t blkno,
  581. size_t numblks,
  582. xfs_buf_flags_t flags)
  583. {
  584. struct xfs_buf *bp;
  585. int error;
  586. DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
  587. error = xfs_buf_find(target, &map, 1, flags, NULL, &bp);
  588. if (error)
  589. return NULL;
  590. return bp;
  591. }
  592. /*
  593. * Assembles a buffer covering the specified range. The code is optimised for
  594. * cache hits, as metadata intensive workloads will see 3 orders of magnitude
  595. * more hits than misses.
  596. */
  597. struct xfs_buf *
  598. xfs_buf_get_map(
  599. struct xfs_buftarg *target,
  600. struct xfs_buf_map *map,
  601. int nmaps,
  602. xfs_buf_flags_t flags)
  603. {
  604. struct xfs_buf *bp;
  605. struct xfs_buf *new_bp;
  606. int error = 0;
  607. error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
  608. switch (error) {
  609. case 0:
  610. /* cache hit */
  611. goto found;
  612. case -EAGAIN:
  613. /* cache hit, trylock failure, caller handles failure */
  614. ASSERT(flags & XBF_TRYLOCK);
  615. return NULL;
  616. case -ENOENT:
  617. /* cache miss, go for insert */
  618. break;
  619. case -EFSCORRUPTED:
  620. default:
  621. /*
  622. * None of the higher layers understand failure types
  623. * yet, so return NULL to signal a fatal lookup error.
  624. */
  625. return NULL;
  626. }
  627. new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
  628. if (unlikely(!new_bp))
  629. return NULL;
  630. error = xfs_buf_allocate_memory(new_bp, flags);
  631. if (error) {
  632. xfs_buf_free(new_bp);
  633. return NULL;
  634. }
  635. error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
  636. if (error) {
  637. xfs_buf_free(new_bp);
  638. return NULL;
  639. }
  640. if (bp != new_bp)
  641. xfs_buf_free(new_bp);
  642. found:
  643. if (!bp->b_addr) {
  644. error = _xfs_buf_map_pages(bp, flags);
  645. if (unlikely(error)) {
  646. xfs_warn(target->bt_mount,
  647. "%s: failed to map pagesn", __func__);
  648. xfs_buf_relse(bp);
  649. return NULL;
  650. }
  651. }
  652. /*
  653. * Clear b_error if this is a lookup from a caller that doesn't expect
  654. * valid data to be found in the buffer.
  655. */
  656. if (!(flags & XBF_READ))
  657. xfs_buf_ioerror(bp, 0);
  658. XFS_STATS_INC(target->bt_mount, xb_get);
  659. trace_xfs_buf_get(bp, flags, _RET_IP_);
  660. return bp;
  661. }
  662. STATIC int
  663. _xfs_buf_read(
  664. xfs_buf_t *bp,
  665. xfs_buf_flags_t flags)
  666. {
  667. ASSERT(!(flags & XBF_WRITE));
  668. ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
  669. bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
  670. bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
  671. if (flags & XBF_ASYNC) {
  672. xfs_buf_submit(bp);
  673. return 0;
  674. }
  675. return xfs_buf_submit_wait(bp);
  676. }
  677. xfs_buf_t *
  678. xfs_buf_read_map(
  679. struct xfs_buftarg *target,
  680. struct xfs_buf_map *map,
  681. int nmaps,
  682. xfs_buf_flags_t flags,
  683. const struct xfs_buf_ops *ops)
  684. {
  685. struct xfs_buf *bp;
  686. flags |= XBF_READ;
  687. bp = xfs_buf_get_map(target, map, nmaps, flags);
  688. if (bp) {
  689. trace_xfs_buf_read(bp, flags, _RET_IP_);
  690. if (!(bp->b_flags & XBF_DONE)) {
  691. XFS_STATS_INC(target->bt_mount, xb_get_read);
  692. bp->b_ops = ops;
  693. _xfs_buf_read(bp, flags);
  694. } else if (flags & XBF_ASYNC) {
  695. /*
  696. * Read ahead call which is already satisfied,
  697. * drop the buffer
  698. */
  699. xfs_buf_relse(bp);
  700. return NULL;
  701. } else {
  702. /* We do not want read in the flags */
  703. bp->b_flags &= ~XBF_READ;
  704. }
  705. }
  706. return bp;
  707. }
  708. /*
  709. * If we are not low on memory then do the readahead in a deadlock
  710. * safe manner.
  711. */
  712. void
  713. xfs_buf_readahead_map(
  714. struct xfs_buftarg *target,
  715. struct xfs_buf_map *map,
  716. int nmaps,
  717. const struct xfs_buf_ops *ops)
  718. {
  719. if (bdi_read_congested(target->bt_bdev->bd_bdi))
  720. return;
  721. xfs_buf_read_map(target, map, nmaps,
  722. XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
  723. }
  724. /*
  725. * Read an uncached buffer from disk. Allocates and returns a locked
  726. * buffer containing the disk contents or nothing.
  727. */
  728. int
  729. xfs_buf_read_uncached(
  730. struct xfs_buftarg *target,
  731. xfs_daddr_t daddr,
  732. size_t numblks,
  733. int flags,
  734. struct xfs_buf **bpp,
  735. const struct xfs_buf_ops *ops)
  736. {
  737. struct xfs_buf *bp;
  738. *bpp = NULL;
  739. bp = xfs_buf_get_uncached(target, numblks, flags);
  740. if (!bp)
  741. return -ENOMEM;
  742. /* set up the buffer for a read IO */
  743. ASSERT(bp->b_map_count == 1);
  744. bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */
  745. bp->b_maps[0].bm_bn = daddr;
  746. bp->b_flags |= XBF_READ;
  747. bp->b_ops = ops;
  748. xfs_buf_submit_wait(bp);
  749. if (bp->b_error) {
  750. int error = bp->b_error;
  751. xfs_buf_relse(bp);
  752. return error;
  753. }
  754. *bpp = bp;
  755. return 0;
  756. }
  757. /*
  758. * Return a buffer allocated as an empty buffer and associated to external
  759. * memory via xfs_buf_associate_memory() back to it's empty state.
  760. */
  761. void
  762. xfs_buf_set_empty(
  763. struct xfs_buf *bp,
  764. size_t numblks)
  765. {
  766. if (bp->b_pages)
  767. _xfs_buf_free_pages(bp);
  768. bp->b_pages = NULL;
  769. bp->b_page_count = 0;
  770. bp->b_addr = NULL;
  771. bp->b_length = numblks;
  772. bp->b_io_length = numblks;
  773. ASSERT(bp->b_map_count == 1);
  774. bp->b_bn = XFS_BUF_DADDR_NULL;
  775. bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
  776. bp->b_maps[0].bm_len = bp->b_length;
  777. }
  778. static inline struct page *
  779. mem_to_page(
  780. void *addr)
  781. {
  782. if ((!is_vmalloc_addr(addr))) {
  783. return virt_to_page(addr);
  784. } else {
  785. return vmalloc_to_page(addr);
  786. }
  787. }
  788. int
  789. xfs_buf_associate_memory(
  790. xfs_buf_t *bp,
  791. void *mem,
  792. size_t len)
  793. {
  794. int rval;
  795. int i = 0;
  796. unsigned long pageaddr;
  797. unsigned long offset;
  798. size_t buflen;
  799. int page_count;
  800. pageaddr = (unsigned long)mem & PAGE_MASK;
  801. offset = (unsigned long)mem - pageaddr;
  802. buflen = PAGE_ALIGN(len + offset);
  803. page_count = buflen >> PAGE_SHIFT;
  804. /* Free any previous set of page pointers */
  805. if (bp->b_pages)
  806. _xfs_buf_free_pages(bp);
  807. bp->b_pages = NULL;
  808. bp->b_addr = mem;
  809. rval = _xfs_buf_get_pages(bp, page_count);
  810. if (rval)
  811. return rval;
  812. bp->b_offset = offset;
  813. for (i = 0; i < bp->b_page_count; i++) {
  814. bp->b_pages[i] = mem_to_page((void *)pageaddr);
  815. pageaddr += PAGE_SIZE;
  816. }
  817. bp->b_io_length = BTOBB(len);
  818. bp->b_length = BTOBB(buflen);
  819. return 0;
  820. }
  821. xfs_buf_t *
  822. xfs_buf_get_uncached(
  823. struct xfs_buftarg *target,
  824. size_t numblks,
  825. int flags)
  826. {
  827. unsigned long page_count;
  828. int error, i;
  829. struct xfs_buf *bp;
  830. DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
  831. /* flags might contain irrelevant bits, pass only what we care about */
  832. bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT);
  833. if (unlikely(bp == NULL))
  834. goto fail;
  835. page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
  836. error = _xfs_buf_get_pages(bp, page_count);
  837. if (error)
  838. goto fail_free_buf;
  839. for (i = 0; i < page_count; i++) {
  840. bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
  841. if (!bp->b_pages[i])
  842. goto fail_free_mem;
  843. }
  844. bp->b_flags |= _XBF_PAGES;
  845. error = _xfs_buf_map_pages(bp, 0);
  846. if (unlikely(error)) {
  847. xfs_warn(target->bt_mount,
  848. "%s: failed to map pages", __func__);
  849. goto fail_free_mem;
  850. }
  851. trace_xfs_buf_get_uncached(bp, _RET_IP_);
  852. return bp;
  853. fail_free_mem:
  854. while (--i >= 0)
  855. __free_page(bp->b_pages[i]);
  856. _xfs_buf_free_pages(bp);
  857. fail_free_buf:
  858. xfs_buf_free_maps(bp);
  859. kmem_zone_free(xfs_buf_zone, bp);
  860. fail:
  861. return NULL;
  862. }
  863. /*
  864. * Increment reference count on buffer, to hold the buffer concurrently
  865. * with another thread which may release (free) the buffer asynchronously.
  866. * Must hold the buffer already to call this function.
  867. */
  868. void
  869. xfs_buf_hold(
  870. xfs_buf_t *bp)
  871. {
  872. trace_xfs_buf_hold(bp, _RET_IP_);
  873. atomic_inc(&bp->b_hold);
  874. }
  875. /*
  876. * Release a hold on the specified buffer. If the hold count is 1, the buffer is
  877. * placed on LRU or freed (depending on b_lru_ref).
  878. */
  879. void
  880. xfs_buf_rele(
  881. xfs_buf_t *bp)
  882. {
  883. struct xfs_perag *pag = bp->b_pag;
  884. bool release;
  885. bool freebuf = false;
  886. trace_xfs_buf_rele(bp, _RET_IP_);
  887. if (!pag) {
  888. ASSERT(list_empty(&bp->b_lru));
  889. if (atomic_dec_and_test(&bp->b_hold)) {
  890. xfs_buf_ioacct_dec(bp);
  891. xfs_buf_free(bp);
  892. }
  893. return;
  894. }
  895. ASSERT(atomic_read(&bp->b_hold) > 0);
  896. release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
  897. spin_lock(&bp->b_lock);
  898. if (!release) {
  899. /*
  900. * Drop the in-flight state if the buffer is already on the LRU
  901. * and it holds the only reference. This is racy because we
  902. * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
  903. * ensures the decrement occurs only once per-buf.
  904. */
  905. if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
  906. __xfs_buf_ioacct_dec(bp);
  907. goto out_unlock;
  908. }
  909. /* the last reference has been dropped ... */
  910. __xfs_buf_ioacct_dec(bp);
  911. if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
  912. /*
  913. * If the buffer is added to the LRU take a new reference to the
  914. * buffer for the LRU and clear the (now stale) dispose list
  915. * state flag
  916. */
  917. if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
  918. bp->b_state &= ~XFS_BSTATE_DISPOSE;
  919. atomic_inc(&bp->b_hold);
  920. }
  921. spin_unlock(&pag->pag_buf_lock);
  922. } else {
  923. /*
  924. * most of the time buffers will already be removed from the
  925. * LRU, so optimise that case by checking for the
  926. * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
  927. * was on was the disposal list
  928. */
  929. if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
  930. list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
  931. } else {
  932. ASSERT(list_empty(&bp->b_lru));
  933. }
  934. ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
  935. rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head,
  936. xfs_buf_hash_params);
  937. spin_unlock(&pag->pag_buf_lock);
  938. xfs_perag_put(pag);
  939. freebuf = true;
  940. }
  941. out_unlock:
  942. spin_unlock(&bp->b_lock);
  943. if (freebuf)
  944. xfs_buf_free(bp);
  945. }
  946. /*
  947. * Lock a buffer object, if it is not already locked.
  948. *
  949. * If we come across a stale, pinned, locked buffer, we know that we are
  950. * being asked to lock a buffer that has been reallocated. Because it is
  951. * pinned, we know that the log has not been pushed to disk and hence it
  952. * will still be locked. Rather than continuing to have trylock attempts
  953. * fail until someone else pushes the log, push it ourselves before
  954. * returning. This means that the xfsaild will not get stuck trying
  955. * to push on stale inode buffers.
  956. */
  957. int
  958. xfs_buf_trylock(
  959. struct xfs_buf *bp)
  960. {
  961. int locked;
  962. locked = down_trylock(&bp->b_sema) == 0;
  963. if (locked) {
  964. XB_SET_OWNER(bp);
  965. trace_xfs_buf_trylock(bp, _RET_IP_);
  966. } else {
  967. trace_xfs_buf_trylock_fail(bp, _RET_IP_);
  968. }
  969. return locked;
  970. }
  971. /*
  972. * Lock a buffer object.
  973. *
  974. * If we come across a stale, pinned, locked buffer, we know that we
  975. * are being asked to lock a buffer that has been reallocated. Because
  976. * it is pinned, we know that the log has not been pushed to disk and
  977. * hence it will still be locked. Rather than sleeping until someone
  978. * else pushes the log, push it ourselves before trying to get the lock.
  979. */
  980. void
  981. xfs_buf_lock(
  982. struct xfs_buf *bp)
  983. {
  984. trace_xfs_buf_lock(bp, _RET_IP_);
  985. if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
  986. xfs_log_force(bp->b_target->bt_mount, 0);
  987. down(&bp->b_sema);
  988. XB_SET_OWNER(bp);
  989. trace_xfs_buf_lock_done(bp, _RET_IP_);
  990. }
  991. void
  992. xfs_buf_unlock(
  993. struct xfs_buf *bp)
  994. {
  995. ASSERT(xfs_buf_islocked(bp));
  996. XB_CLEAR_OWNER(bp);
  997. up(&bp->b_sema);
  998. trace_xfs_buf_unlock(bp, _RET_IP_);
  999. }
  1000. STATIC void
  1001. xfs_buf_wait_unpin(
  1002. xfs_buf_t *bp)
  1003. {
  1004. DECLARE_WAITQUEUE (wait, current);
  1005. if (atomic_read(&bp->b_pin_count) == 0)
  1006. return;
  1007. add_wait_queue(&bp->b_waiters, &wait);
  1008. for (;;) {
  1009. set_current_state(TASK_UNINTERRUPTIBLE);
  1010. if (atomic_read(&bp->b_pin_count) == 0)
  1011. break;
  1012. io_schedule();
  1013. }
  1014. remove_wait_queue(&bp->b_waiters, &wait);
  1015. set_current_state(TASK_RUNNING);
  1016. }
  1017. /*
  1018. * Buffer Utility Routines
  1019. */
  1020. void
  1021. xfs_buf_ioend(
  1022. struct xfs_buf *bp)
  1023. {
  1024. bool read = bp->b_flags & XBF_READ;
  1025. trace_xfs_buf_iodone(bp, _RET_IP_);
  1026. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
  1027. /*
  1028. * Pull in IO completion errors now. We are guaranteed to be running
  1029. * single threaded, so we don't need the lock to read b_io_error.
  1030. */
  1031. if (!bp->b_error && bp->b_io_error)
  1032. xfs_buf_ioerror(bp, bp->b_io_error);
  1033. /* Only validate buffers that were read without errors */
  1034. if (read && !bp->b_error && bp->b_ops) {
  1035. ASSERT(!bp->b_iodone);
  1036. bp->b_ops->verify_read(bp);
  1037. }
  1038. if (!bp->b_error)
  1039. bp->b_flags |= XBF_DONE;
  1040. if (bp->b_iodone)
  1041. (*(bp->b_iodone))(bp);
  1042. else if (bp->b_flags & XBF_ASYNC)
  1043. xfs_buf_relse(bp);
  1044. else
  1045. complete(&bp->b_iowait);
  1046. }
  1047. static void
  1048. xfs_buf_ioend_work(
  1049. struct work_struct *work)
  1050. {
  1051. struct xfs_buf *bp =
  1052. container_of(work, xfs_buf_t, b_ioend_work);
  1053. xfs_buf_ioend(bp);
  1054. }
  1055. static void
  1056. xfs_buf_ioend_async(
  1057. struct xfs_buf *bp)
  1058. {
  1059. INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
  1060. queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
  1061. }
  1062. void
  1063. __xfs_buf_ioerror(
  1064. xfs_buf_t *bp,
  1065. int error,
  1066. xfs_failaddr_t failaddr)
  1067. {
  1068. ASSERT(error <= 0 && error >= -1000);
  1069. bp->b_error = error;
  1070. trace_xfs_buf_ioerror(bp, error, failaddr);
  1071. }
  1072. void
  1073. xfs_buf_ioerror_alert(
  1074. struct xfs_buf *bp,
  1075. const char *func)
  1076. {
  1077. xfs_alert(bp->b_target->bt_mount,
  1078. "metadata I/O error in \"%s\" at daddr 0x%llx len %d error %d",
  1079. func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length,
  1080. -bp->b_error);
  1081. }
  1082. int
  1083. xfs_bwrite(
  1084. struct xfs_buf *bp)
  1085. {
  1086. int error;
  1087. ASSERT(xfs_buf_islocked(bp));
  1088. bp->b_flags |= XBF_WRITE;
  1089. bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
  1090. XBF_WRITE_FAIL | XBF_DONE);
  1091. error = xfs_buf_submit_wait(bp);
  1092. if (error) {
  1093. xfs_force_shutdown(bp->b_target->bt_mount,
  1094. SHUTDOWN_META_IO_ERROR);
  1095. }
  1096. return error;
  1097. }
  1098. static void
  1099. xfs_buf_bio_end_io(
  1100. struct bio *bio)
  1101. {
  1102. struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
  1103. /*
  1104. * don't overwrite existing errors - otherwise we can lose errors on
  1105. * buffers that require multiple bios to complete.
  1106. */
  1107. if (bio->bi_status) {
  1108. int error = blk_status_to_errno(bio->bi_status);
  1109. cmpxchg(&bp->b_io_error, 0, error);
  1110. }
  1111. if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
  1112. invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
  1113. if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
  1114. xfs_buf_ioend_async(bp);
  1115. bio_put(bio);
  1116. }
  1117. static void
  1118. xfs_buf_ioapply_map(
  1119. struct xfs_buf *bp,
  1120. int map,
  1121. int *buf_offset,
  1122. int *count,
  1123. int op,
  1124. int op_flags)
  1125. {
  1126. int page_index;
  1127. int total_nr_pages = bp->b_page_count;
  1128. int nr_pages;
  1129. struct bio *bio;
  1130. sector_t sector = bp->b_maps[map].bm_bn;
  1131. int size;
  1132. int offset;
  1133. /* skip the pages in the buffer before the start offset */
  1134. page_index = 0;
  1135. offset = *buf_offset;
  1136. while (offset >= PAGE_SIZE) {
  1137. page_index++;
  1138. offset -= PAGE_SIZE;
  1139. }
  1140. /*
  1141. * Limit the IO size to the length of the current vector, and update the
  1142. * remaining IO count for the next time around.
  1143. */
  1144. size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
  1145. *count -= size;
  1146. *buf_offset += size;
  1147. next_chunk:
  1148. atomic_inc(&bp->b_io_remaining);
  1149. nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
  1150. bio = bio_alloc(GFP_NOIO, nr_pages);
  1151. bio_set_dev(bio, bp->b_target->bt_bdev);
  1152. bio->bi_iter.bi_sector = sector;
  1153. bio->bi_end_io = xfs_buf_bio_end_io;
  1154. bio->bi_private = bp;
  1155. bio_set_op_attrs(bio, op, op_flags);
  1156. for (; size && nr_pages; nr_pages--, page_index++) {
  1157. int rbytes, nbytes = PAGE_SIZE - offset;
  1158. if (nbytes > size)
  1159. nbytes = size;
  1160. rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
  1161. offset);
  1162. if (rbytes < nbytes)
  1163. break;
  1164. offset = 0;
  1165. sector += BTOBB(nbytes);
  1166. size -= nbytes;
  1167. total_nr_pages--;
  1168. }
  1169. if (likely(bio->bi_iter.bi_size)) {
  1170. if (xfs_buf_is_vmapped(bp)) {
  1171. flush_kernel_vmap_range(bp->b_addr,
  1172. xfs_buf_vmap_len(bp));
  1173. }
  1174. submit_bio(bio);
  1175. if (size)
  1176. goto next_chunk;
  1177. } else {
  1178. /*
  1179. * This is guaranteed not to be the last io reference count
  1180. * because the caller (xfs_buf_submit) holds a count itself.
  1181. */
  1182. atomic_dec(&bp->b_io_remaining);
  1183. xfs_buf_ioerror(bp, -EIO);
  1184. bio_put(bio);
  1185. }
  1186. }
  1187. STATIC void
  1188. _xfs_buf_ioapply(
  1189. struct xfs_buf *bp)
  1190. {
  1191. struct blk_plug plug;
  1192. int op;
  1193. int op_flags = 0;
  1194. int offset;
  1195. int size;
  1196. int i;
  1197. /*
  1198. * Make sure we capture only current IO errors rather than stale errors
  1199. * left over from previous use of the buffer (e.g. failed readahead).
  1200. */
  1201. bp->b_error = 0;
  1202. /*
  1203. * Initialize the I/O completion workqueue if we haven't yet or the
  1204. * submitter has not opted to specify a custom one.
  1205. */
  1206. if (!bp->b_ioend_wq)
  1207. bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
  1208. if (bp->b_flags & XBF_WRITE) {
  1209. op = REQ_OP_WRITE;
  1210. if (bp->b_flags & XBF_SYNCIO)
  1211. op_flags = REQ_SYNC;
  1212. if (bp->b_flags & XBF_FUA)
  1213. op_flags |= REQ_FUA;
  1214. if (bp->b_flags & XBF_FLUSH)
  1215. op_flags |= REQ_PREFLUSH;
  1216. /*
  1217. * Run the write verifier callback function if it exists. If
  1218. * this function fails it will mark the buffer with an error and
  1219. * the IO should not be dispatched.
  1220. */
  1221. if (bp->b_ops) {
  1222. bp->b_ops->verify_write(bp);
  1223. if (bp->b_error) {
  1224. xfs_force_shutdown(bp->b_target->bt_mount,
  1225. SHUTDOWN_CORRUPT_INCORE);
  1226. return;
  1227. }
  1228. } else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
  1229. struct xfs_mount *mp = bp->b_target->bt_mount;
  1230. /*
  1231. * non-crc filesystems don't attach verifiers during
  1232. * log recovery, so don't warn for such filesystems.
  1233. */
  1234. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  1235. xfs_warn(mp,
  1236. "%s: no buf ops on daddr 0x%llx len %d",
  1237. __func__, bp->b_bn, bp->b_length);
  1238. xfs_hex_dump(bp->b_addr,
  1239. XFS_CORRUPTION_DUMP_LEN);
  1240. dump_stack();
  1241. }
  1242. }
  1243. } else if (bp->b_flags & XBF_READ_AHEAD) {
  1244. op = REQ_OP_READ;
  1245. op_flags = REQ_RAHEAD;
  1246. } else {
  1247. op = REQ_OP_READ;
  1248. }
  1249. /* we only use the buffer cache for meta-data */
  1250. op_flags |= REQ_META;
  1251. /*
  1252. * Walk all the vectors issuing IO on them. Set up the initial offset
  1253. * into the buffer and the desired IO size before we start -
  1254. * _xfs_buf_ioapply_vec() will modify them appropriately for each
  1255. * subsequent call.
  1256. */
  1257. offset = bp->b_offset;
  1258. size = BBTOB(bp->b_io_length);
  1259. blk_start_plug(&plug);
  1260. for (i = 0; i < bp->b_map_count; i++) {
  1261. xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags);
  1262. if (bp->b_error)
  1263. break;
  1264. if (size <= 0)
  1265. break; /* all done */
  1266. }
  1267. blk_finish_plug(&plug);
  1268. }
  1269. /*
  1270. * Asynchronous IO submission path. This transfers the buffer lock ownership and
  1271. * the current reference to the IO. It is not safe to reference the buffer after
  1272. * a call to this function unless the caller holds an additional reference
  1273. * itself.
  1274. */
  1275. void
  1276. xfs_buf_submit(
  1277. struct xfs_buf *bp)
  1278. {
  1279. trace_xfs_buf_submit(bp, _RET_IP_);
  1280. ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
  1281. ASSERT(bp->b_flags & XBF_ASYNC);
  1282. /* on shutdown we stale and complete the buffer immediately */
  1283. if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
  1284. xfs_buf_ioerror(bp, -EIO);
  1285. bp->b_flags &= ~XBF_DONE;
  1286. xfs_buf_stale(bp);
  1287. xfs_buf_ioend(bp);
  1288. return;
  1289. }
  1290. if (bp->b_flags & XBF_WRITE)
  1291. xfs_buf_wait_unpin(bp);
  1292. /* clear the internal error state to avoid spurious errors */
  1293. bp->b_io_error = 0;
  1294. /*
  1295. * The caller's reference is released during I/O completion.
  1296. * This occurs some time after the last b_io_remaining reference is
  1297. * released, so after we drop our Io reference we have to have some
  1298. * other reference to ensure the buffer doesn't go away from underneath
  1299. * us. Take a direct reference to ensure we have safe access to the
  1300. * buffer until we are finished with it.
  1301. */
  1302. xfs_buf_hold(bp);
  1303. /*
  1304. * Set the count to 1 initially, this will stop an I/O completion
  1305. * callout which happens before we have started all the I/O from calling
  1306. * xfs_buf_ioend too early.
  1307. */
  1308. atomic_set(&bp->b_io_remaining, 1);
  1309. xfs_buf_ioacct_inc(bp);
  1310. _xfs_buf_ioapply(bp);
  1311. /*
  1312. * If _xfs_buf_ioapply failed, we can get back here with only the IO
  1313. * reference we took above. If we drop it to zero, run completion so
  1314. * that we don't return to the caller with completion still pending.
  1315. */
  1316. if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
  1317. if (bp->b_error)
  1318. xfs_buf_ioend(bp);
  1319. else
  1320. xfs_buf_ioend_async(bp);
  1321. }
  1322. xfs_buf_rele(bp);
  1323. /* Note: it is not safe to reference bp now we've dropped our ref */
  1324. }
  1325. /*
  1326. * Synchronous buffer IO submission path, read or write.
  1327. */
  1328. int
  1329. xfs_buf_submit_wait(
  1330. struct xfs_buf *bp)
  1331. {
  1332. int error;
  1333. trace_xfs_buf_submit_wait(bp, _RET_IP_);
  1334. ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
  1335. if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
  1336. xfs_buf_ioerror(bp, -EIO);
  1337. xfs_buf_stale(bp);
  1338. bp->b_flags &= ~XBF_DONE;
  1339. return -EIO;
  1340. }
  1341. if (bp->b_flags & XBF_WRITE)
  1342. xfs_buf_wait_unpin(bp);
  1343. /* clear the internal error state to avoid spurious errors */
  1344. bp->b_io_error = 0;
  1345. /*
  1346. * For synchronous IO, the IO does not inherit the submitters reference
  1347. * count, nor the buffer lock. Hence we cannot release the reference we
  1348. * are about to take until we've waited for all IO completion to occur,
  1349. * including any xfs_buf_ioend_async() work that may be pending.
  1350. */
  1351. xfs_buf_hold(bp);
  1352. /*
  1353. * Set the count to 1 initially, this will stop an I/O completion
  1354. * callout which happens before we have started all the I/O from calling
  1355. * xfs_buf_ioend too early.
  1356. */
  1357. atomic_set(&bp->b_io_remaining, 1);
  1358. _xfs_buf_ioapply(bp);
  1359. /*
  1360. * make sure we run completion synchronously if it raced with us and is
  1361. * already complete.
  1362. */
  1363. if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
  1364. xfs_buf_ioend(bp);
  1365. /* wait for completion before gathering the error from the buffer */
  1366. trace_xfs_buf_iowait(bp, _RET_IP_);
  1367. wait_for_completion(&bp->b_iowait);
  1368. trace_xfs_buf_iowait_done(bp, _RET_IP_);
  1369. error = bp->b_error;
  1370. /*
  1371. * all done now, we can release the hold that keeps the buffer
  1372. * referenced for the entire IO.
  1373. */
  1374. xfs_buf_rele(bp);
  1375. return error;
  1376. }
  1377. void *
  1378. xfs_buf_offset(
  1379. struct xfs_buf *bp,
  1380. size_t offset)
  1381. {
  1382. struct page *page;
  1383. if (bp->b_addr)
  1384. return bp->b_addr + offset;
  1385. offset += bp->b_offset;
  1386. page = bp->b_pages[offset >> PAGE_SHIFT];
  1387. return page_address(page) + (offset & (PAGE_SIZE-1));
  1388. }
  1389. /*
  1390. * Move data into or out of a buffer.
  1391. */
  1392. void
  1393. xfs_buf_iomove(
  1394. xfs_buf_t *bp, /* buffer to process */
  1395. size_t boff, /* starting buffer offset */
  1396. size_t bsize, /* length to copy */
  1397. void *data, /* data address */
  1398. xfs_buf_rw_t mode) /* read/write/zero flag */
  1399. {
  1400. size_t bend;
  1401. bend = boff + bsize;
  1402. while (boff < bend) {
  1403. struct page *page;
  1404. int page_index, page_offset, csize;
  1405. page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
  1406. page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
  1407. page = bp->b_pages[page_index];
  1408. csize = min_t(size_t, PAGE_SIZE - page_offset,
  1409. BBTOB(bp->b_io_length) - boff);
  1410. ASSERT((csize + page_offset) <= PAGE_SIZE);
  1411. switch (mode) {
  1412. case XBRW_ZERO:
  1413. memset(page_address(page) + page_offset, 0, csize);
  1414. break;
  1415. case XBRW_READ:
  1416. memcpy(data, page_address(page) + page_offset, csize);
  1417. break;
  1418. case XBRW_WRITE:
  1419. memcpy(page_address(page) + page_offset, data, csize);
  1420. }
  1421. boff += csize;
  1422. data += csize;
  1423. }
  1424. }
  1425. /*
  1426. * Handling of buffer targets (buftargs).
  1427. */
  1428. /*
  1429. * Wait for any bufs with callbacks that have been submitted but have not yet
  1430. * returned. These buffers will have an elevated hold count, so wait on those
  1431. * while freeing all the buffers only held by the LRU.
  1432. */
  1433. static enum lru_status
  1434. xfs_buftarg_wait_rele(
  1435. struct list_head *item,
  1436. struct list_lru_one *lru,
  1437. spinlock_t *lru_lock,
  1438. void *arg)
  1439. {
  1440. struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
  1441. struct list_head *dispose = arg;
  1442. if (atomic_read(&bp->b_hold) > 1) {
  1443. /* need to wait, so skip it this pass */
  1444. trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
  1445. return LRU_SKIP;
  1446. }
  1447. if (!spin_trylock(&bp->b_lock))
  1448. return LRU_SKIP;
  1449. /*
  1450. * clear the LRU reference count so the buffer doesn't get
  1451. * ignored in xfs_buf_rele().
  1452. */
  1453. atomic_set(&bp->b_lru_ref, 0);
  1454. bp->b_state |= XFS_BSTATE_DISPOSE;
  1455. list_lru_isolate_move(lru, item, dispose);
  1456. spin_unlock(&bp->b_lock);
  1457. return LRU_REMOVED;
  1458. }
  1459. void
  1460. xfs_wait_buftarg(
  1461. struct xfs_buftarg *btp)
  1462. {
  1463. LIST_HEAD(dispose);
  1464. int loop = 0;
  1465. /*
  1466. * First wait on the buftarg I/O count for all in-flight buffers to be
  1467. * released. This is critical as new buffers do not make the LRU until
  1468. * they are released.
  1469. *
  1470. * Next, flush the buffer workqueue to ensure all completion processing
  1471. * has finished. Just waiting on buffer locks is not sufficient for
  1472. * async IO as the reference count held over IO is not released until
  1473. * after the buffer lock is dropped. Hence we need to ensure here that
  1474. * all reference counts have been dropped before we start walking the
  1475. * LRU list.
  1476. */
  1477. while (percpu_counter_sum(&btp->bt_io_count))
  1478. delay(100);
  1479. flush_workqueue(btp->bt_mount->m_buf_workqueue);
  1480. /* loop until there is nothing left on the lru list. */
  1481. while (list_lru_count(&btp->bt_lru)) {
  1482. list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
  1483. &dispose, LONG_MAX);
  1484. while (!list_empty(&dispose)) {
  1485. struct xfs_buf *bp;
  1486. bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
  1487. list_del_init(&bp->b_lru);
  1488. if (bp->b_flags & XBF_WRITE_FAIL) {
  1489. xfs_alert(btp->bt_mount,
  1490. "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
  1491. (long long)bp->b_bn);
  1492. xfs_alert(btp->bt_mount,
  1493. "Please run xfs_repair to determine the extent of the problem.");
  1494. }
  1495. xfs_buf_rele(bp);
  1496. }
  1497. if (loop++ != 0)
  1498. delay(100);
  1499. }
  1500. }
  1501. static enum lru_status
  1502. xfs_buftarg_isolate(
  1503. struct list_head *item,
  1504. struct list_lru_one *lru,
  1505. spinlock_t *lru_lock,
  1506. void *arg)
  1507. {
  1508. struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
  1509. struct list_head *dispose = arg;
  1510. /*
  1511. * we are inverting the lru lock/bp->b_lock here, so use a trylock.
  1512. * If we fail to get the lock, just skip it.
  1513. */
  1514. if (!spin_trylock(&bp->b_lock))
  1515. return LRU_SKIP;
  1516. /*
  1517. * Decrement the b_lru_ref count unless the value is already
  1518. * zero. If the value is already zero, we need to reclaim the
  1519. * buffer, otherwise it gets another trip through the LRU.
  1520. */
  1521. if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
  1522. spin_unlock(&bp->b_lock);
  1523. return LRU_ROTATE;
  1524. }
  1525. bp->b_state |= XFS_BSTATE_DISPOSE;
  1526. list_lru_isolate_move(lru, item, dispose);
  1527. spin_unlock(&bp->b_lock);
  1528. return LRU_REMOVED;
  1529. }
  1530. static unsigned long
  1531. xfs_buftarg_shrink_scan(
  1532. struct shrinker *shrink,
  1533. struct shrink_control *sc)
  1534. {
  1535. struct xfs_buftarg *btp = container_of(shrink,
  1536. struct xfs_buftarg, bt_shrinker);
  1537. LIST_HEAD(dispose);
  1538. unsigned long freed;
  1539. freed = list_lru_shrink_walk(&btp->bt_lru, sc,
  1540. xfs_buftarg_isolate, &dispose);
  1541. while (!list_empty(&dispose)) {
  1542. struct xfs_buf *bp;
  1543. bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
  1544. list_del_init(&bp->b_lru);
  1545. xfs_buf_rele(bp);
  1546. }
  1547. return freed;
  1548. }
  1549. static unsigned long
  1550. xfs_buftarg_shrink_count(
  1551. struct shrinker *shrink,
  1552. struct shrink_control *sc)
  1553. {
  1554. struct xfs_buftarg *btp = container_of(shrink,
  1555. struct xfs_buftarg, bt_shrinker);
  1556. return list_lru_shrink_count(&btp->bt_lru, sc);
  1557. }
  1558. void
  1559. xfs_free_buftarg(
  1560. struct xfs_buftarg *btp)
  1561. {
  1562. unregister_shrinker(&btp->bt_shrinker);
  1563. ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
  1564. percpu_counter_destroy(&btp->bt_io_count);
  1565. list_lru_destroy(&btp->bt_lru);
  1566. xfs_blkdev_issue_flush(btp);
  1567. kmem_free(btp);
  1568. }
  1569. int
  1570. xfs_setsize_buftarg(
  1571. xfs_buftarg_t *btp,
  1572. unsigned int sectorsize)
  1573. {
  1574. /* Set up metadata sector size info */
  1575. btp->bt_meta_sectorsize = sectorsize;
  1576. btp->bt_meta_sectormask = sectorsize - 1;
  1577. if (set_blocksize(btp->bt_bdev, sectorsize)) {
  1578. xfs_warn(btp->bt_mount,
  1579. "Cannot set_blocksize to %u on device %pg",
  1580. sectorsize, btp->bt_bdev);
  1581. return -EINVAL;
  1582. }
  1583. /* Set up device logical sector size mask */
  1584. btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
  1585. btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
  1586. return 0;
  1587. }
  1588. /*
  1589. * When allocating the initial buffer target we have not yet
  1590. * read in the superblock, so don't know what sized sectors
  1591. * are being used at this early stage. Play safe.
  1592. */
  1593. STATIC int
  1594. xfs_setsize_buftarg_early(
  1595. xfs_buftarg_t *btp,
  1596. struct block_device *bdev)
  1597. {
  1598. return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
  1599. }
  1600. xfs_buftarg_t *
  1601. xfs_alloc_buftarg(
  1602. struct xfs_mount *mp,
  1603. struct block_device *bdev,
  1604. struct dax_device *dax_dev)
  1605. {
  1606. xfs_buftarg_t *btp;
  1607. btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
  1608. btp->bt_mount = mp;
  1609. btp->bt_dev = bdev->bd_dev;
  1610. btp->bt_bdev = bdev;
  1611. btp->bt_daxdev = dax_dev;
  1612. if (xfs_setsize_buftarg_early(btp, bdev))
  1613. goto error_free;
  1614. if (list_lru_init(&btp->bt_lru))
  1615. goto error_free;
  1616. if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
  1617. goto error_lru;
  1618. btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
  1619. btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
  1620. btp->bt_shrinker.seeks = DEFAULT_SEEKS;
  1621. btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
  1622. if (register_shrinker(&btp->bt_shrinker))
  1623. goto error_pcpu;
  1624. return btp;
  1625. error_pcpu:
  1626. percpu_counter_destroy(&btp->bt_io_count);
  1627. error_lru:
  1628. list_lru_destroy(&btp->bt_lru);
  1629. error_free:
  1630. kmem_free(btp);
  1631. return NULL;
  1632. }
  1633. /*
  1634. * Cancel a delayed write list.
  1635. *
  1636. * Remove each buffer from the list, clear the delwri queue flag and drop the
  1637. * associated buffer reference.
  1638. */
  1639. void
  1640. xfs_buf_delwri_cancel(
  1641. struct list_head *list)
  1642. {
  1643. struct xfs_buf *bp;
  1644. while (!list_empty(list)) {
  1645. bp = list_first_entry(list, struct xfs_buf, b_list);
  1646. xfs_buf_lock(bp);
  1647. bp->b_flags &= ~_XBF_DELWRI_Q;
  1648. list_del_init(&bp->b_list);
  1649. xfs_buf_relse(bp);
  1650. }
  1651. }
  1652. /*
  1653. * Add a buffer to the delayed write list.
  1654. *
  1655. * This queues a buffer for writeout if it hasn't already been. Note that
  1656. * neither this routine nor the buffer list submission functions perform
  1657. * any internal synchronization. It is expected that the lists are thread-local
  1658. * to the callers.
  1659. *
  1660. * Returns true if we queued up the buffer, or false if it already had
  1661. * been on the buffer list.
  1662. */
  1663. bool
  1664. xfs_buf_delwri_queue(
  1665. struct xfs_buf *bp,
  1666. struct list_head *list)
  1667. {
  1668. ASSERT(xfs_buf_islocked(bp));
  1669. ASSERT(!(bp->b_flags & XBF_READ));
  1670. /*
  1671. * If the buffer is already marked delwri it already is queued up
  1672. * by someone else for imediate writeout. Just ignore it in that
  1673. * case.
  1674. */
  1675. if (bp->b_flags & _XBF_DELWRI_Q) {
  1676. trace_xfs_buf_delwri_queued(bp, _RET_IP_);
  1677. return false;
  1678. }
  1679. trace_xfs_buf_delwri_queue(bp, _RET_IP_);
  1680. /*
  1681. * If a buffer gets written out synchronously or marked stale while it
  1682. * is on a delwri list we lazily remove it. To do this, the other party
  1683. * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
  1684. * It remains referenced and on the list. In a rare corner case it
  1685. * might get readded to a delwri list after the synchronous writeout, in
  1686. * which case we need just need to re-add the flag here.
  1687. */
  1688. bp->b_flags |= _XBF_DELWRI_Q;
  1689. if (list_empty(&bp->b_list)) {
  1690. atomic_inc(&bp->b_hold);
  1691. list_add_tail(&bp->b_list, list);
  1692. }
  1693. return true;
  1694. }
  1695. /*
  1696. * Compare function is more complex than it needs to be because
  1697. * the return value is only 32 bits and we are doing comparisons
  1698. * on 64 bit values
  1699. */
  1700. static int
  1701. xfs_buf_cmp(
  1702. void *priv,
  1703. struct list_head *a,
  1704. struct list_head *b)
  1705. {
  1706. struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
  1707. struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
  1708. xfs_daddr_t diff;
  1709. diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
  1710. if (diff < 0)
  1711. return -1;
  1712. if (diff > 0)
  1713. return 1;
  1714. return 0;
  1715. }
  1716. /*
  1717. * submit buffers for write.
  1718. *
  1719. * When we have a large buffer list, we do not want to hold all the buffers
  1720. * locked while we block on the request queue waiting for IO dispatch. To avoid
  1721. * this problem, we lock and submit buffers in groups of 50, thereby minimising
  1722. * the lock hold times for lists which may contain thousands of objects.
  1723. *
  1724. * To do this, we sort the buffer list before we walk the list to lock and
  1725. * submit buffers, and we plug and unplug around each group of buffers we
  1726. * submit.
  1727. */
  1728. static int
  1729. xfs_buf_delwri_submit_buffers(
  1730. struct list_head *buffer_list,
  1731. struct list_head *wait_list)
  1732. {
  1733. struct xfs_buf *bp, *n;
  1734. LIST_HEAD (submit_list);
  1735. int pinned = 0;
  1736. struct blk_plug plug;
  1737. list_sort(NULL, buffer_list, xfs_buf_cmp);
  1738. blk_start_plug(&plug);
  1739. list_for_each_entry_safe(bp, n, buffer_list, b_list) {
  1740. if (!wait_list) {
  1741. if (xfs_buf_ispinned(bp)) {
  1742. pinned++;
  1743. continue;
  1744. }
  1745. if (!xfs_buf_trylock(bp))
  1746. continue;
  1747. } else {
  1748. xfs_buf_lock(bp);
  1749. }
  1750. /*
  1751. * Someone else might have written the buffer synchronously or
  1752. * marked it stale in the meantime. In that case only the
  1753. * _XBF_DELWRI_Q flag got cleared, and we have to drop the
  1754. * reference and remove it from the list here.
  1755. */
  1756. if (!(bp->b_flags & _XBF_DELWRI_Q)) {
  1757. list_del_init(&bp->b_list);
  1758. xfs_buf_relse(bp);
  1759. continue;
  1760. }
  1761. trace_xfs_buf_delwri_split(bp, _RET_IP_);
  1762. /*
  1763. * We do all IO submission async. This means if we need
  1764. * to wait for IO completion we need to take an extra
  1765. * reference so the buffer is still valid on the other
  1766. * side. We need to move the buffer onto the io_list
  1767. * at this point so the caller can still access it.
  1768. */
  1769. bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
  1770. bp->b_flags |= XBF_WRITE | XBF_ASYNC;
  1771. if (wait_list) {
  1772. xfs_buf_hold(bp);
  1773. list_move_tail(&bp->b_list, wait_list);
  1774. } else
  1775. list_del_init(&bp->b_list);
  1776. xfs_buf_submit(bp);
  1777. }
  1778. blk_finish_plug(&plug);
  1779. return pinned;
  1780. }
  1781. /*
  1782. * Write out a buffer list asynchronously.
  1783. *
  1784. * This will take the @buffer_list, write all non-locked and non-pinned buffers
  1785. * out and not wait for I/O completion on any of the buffers. This interface
  1786. * is only safely useable for callers that can track I/O completion by higher
  1787. * level means, e.g. AIL pushing as the @buffer_list is consumed in this
  1788. * function.
  1789. */
  1790. int
  1791. xfs_buf_delwri_submit_nowait(
  1792. struct list_head *buffer_list)
  1793. {
  1794. return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
  1795. }
  1796. /*
  1797. * Write out a buffer list synchronously.
  1798. *
  1799. * This will take the @buffer_list, write all buffers out and wait for I/O
  1800. * completion on all of the buffers. @buffer_list is consumed by the function,
  1801. * so callers must have some other way of tracking buffers if they require such
  1802. * functionality.
  1803. */
  1804. int
  1805. xfs_buf_delwri_submit(
  1806. struct list_head *buffer_list)
  1807. {
  1808. LIST_HEAD (wait_list);
  1809. int error = 0, error2;
  1810. struct xfs_buf *bp;
  1811. xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
  1812. /* Wait for IO to complete. */
  1813. while (!list_empty(&wait_list)) {
  1814. bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
  1815. list_del_init(&bp->b_list);
  1816. /* locking the buffer will wait for async IO completion. */
  1817. xfs_buf_lock(bp);
  1818. error2 = bp->b_error;
  1819. xfs_buf_relse(bp);
  1820. if (!error)
  1821. error = error2;
  1822. }
  1823. return error;
  1824. }
  1825. /*
  1826. * Push a single buffer on a delwri queue.
  1827. *
  1828. * The purpose of this function is to submit a single buffer of a delwri queue
  1829. * and return with the buffer still on the original queue. The waiting delwri
  1830. * buffer submission infrastructure guarantees transfer of the delwri queue
  1831. * buffer reference to a temporary wait list. We reuse this infrastructure to
  1832. * transfer the buffer back to the original queue.
  1833. *
  1834. * Note the buffer transitions from the queued state, to the submitted and wait
  1835. * listed state and back to the queued state during this call. The buffer
  1836. * locking and queue management logic between _delwri_pushbuf() and
  1837. * _delwri_queue() guarantee that the buffer cannot be queued to another list
  1838. * before returning.
  1839. */
  1840. int
  1841. xfs_buf_delwri_pushbuf(
  1842. struct xfs_buf *bp,
  1843. struct list_head *buffer_list)
  1844. {
  1845. LIST_HEAD (submit_list);
  1846. int error;
  1847. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1848. trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
  1849. /*
  1850. * Isolate the buffer to a new local list so we can submit it for I/O
  1851. * independently from the rest of the original list.
  1852. */
  1853. xfs_buf_lock(bp);
  1854. list_move(&bp->b_list, &submit_list);
  1855. xfs_buf_unlock(bp);
  1856. /*
  1857. * Delwri submission clears the DELWRI_Q buffer flag and returns with
  1858. * the buffer on the wait list with an associated reference. Rather than
  1859. * bounce the buffer from a local wait list back to the original list
  1860. * after I/O completion, reuse the original list as the wait list.
  1861. */
  1862. xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
  1863. /*
  1864. * The buffer is now under I/O and wait listed as during typical delwri
  1865. * submission. Lock the buffer to wait for I/O completion. Rather than
  1866. * remove the buffer from the wait list and release the reference, we
  1867. * want to return with the buffer queued to the original list. The
  1868. * buffer already sits on the original list with a wait list reference,
  1869. * however. If we let the queue inherit that wait list reference, all we
  1870. * need to do is reset the DELWRI_Q flag.
  1871. */
  1872. xfs_buf_lock(bp);
  1873. error = bp->b_error;
  1874. bp->b_flags |= _XBF_DELWRI_Q;
  1875. xfs_buf_unlock(bp);
  1876. return error;
  1877. }
  1878. int __init
  1879. xfs_buf_init(void)
  1880. {
  1881. xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
  1882. KM_ZONE_HWALIGN, NULL);
  1883. if (!xfs_buf_zone)
  1884. goto out;
  1885. return 0;
  1886. out:
  1887. return -ENOMEM;
  1888. }
  1889. void
  1890. xfs_buf_terminate(void)
  1891. {
  1892. kmem_zone_destroy(xfs_buf_zone);
  1893. }
  1894. void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
  1895. {
  1896. /*
  1897. * Set the lru reference count to 0 based on the error injection tag.
  1898. * This allows userspace to disrupt buffer caching for debug/testing
  1899. * purposes.
  1900. */
  1901. if (XFS_TEST_ERROR(false, bp->b_target->bt_mount,
  1902. XFS_ERRTAG_BUF_LRU_REF))
  1903. lru_ref = 0;
  1904. atomic_set(&bp->b_lru_ref, lru_ref);
  1905. }