xfs_buf.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include <linux/stddef.h>
  20. #include <linux/errno.h>
  21. #include <linux/gfp.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/init.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/bio.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/percpu.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/hash.h>
  32. #include <linux/kthread.h>
  33. #include <linux/migrate.h>
  34. #include <linux/backing-dev.h>
  35. #include <linux/freezer.h>
  36. #include "xfs_format.h"
  37. #include "xfs_log_format.h"
  38. #include "xfs_trans_resv.h"
  39. #include "xfs_sb.h"
  40. #include "xfs_mount.h"
  41. #include "xfs_trace.h"
  42. #include "xfs_log.h"
  43. static kmem_zone_t *xfs_buf_zone;
  44. #ifdef XFS_BUF_LOCK_TRACKING
  45. # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
  46. # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
  47. # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
  48. #else
  49. # define XB_SET_OWNER(bp) do { } while (0)
  50. # define XB_CLEAR_OWNER(bp) do { } while (0)
  51. # define XB_GET_OWNER(bp) do { } while (0)
  52. #endif
  53. #define xb_to_gfp(flags) \
  54. ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
  55. static inline int
  56. xfs_buf_is_vmapped(
  57. struct xfs_buf *bp)
  58. {
  59. /*
  60. * Return true if the buffer is vmapped.
  61. *
  62. * b_addr is null if the buffer is not mapped, but the code is clever
  63. * enough to know it doesn't have to map a single page, so the check has
  64. * to be both for b_addr and bp->b_page_count > 1.
  65. */
  66. return bp->b_addr && bp->b_page_count > 1;
  67. }
  68. static inline int
  69. xfs_buf_vmap_len(
  70. struct xfs_buf *bp)
  71. {
  72. return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
  73. }
  74. /*
  75. * When we mark a buffer stale, we remove the buffer from the LRU and clear the
  76. * b_lru_ref count so that the buffer is freed immediately when the buffer
  77. * reference count falls to zero. If the buffer is already on the LRU, we need
  78. * to remove the reference that LRU holds on the buffer.
  79. *
  80. * This prevents build-up of stale buffers on the LRU.
  81. */
  82. void
  83. xfs_buf_stale(
  84. struct xfs_buf *bp)
  85. {
  86. ASSERT(xfs_buf_islocked(bp));
  87. bp->b_flags |= XBF_STALE;
  88. /*
  89. * Clear the delwri status so that a delwri queue walker will not
  90. * flush this buffer to disk now that it is stale. The delwri queue has
  91. * a reference to the buffer, so this is safe to do.
  92. */
  93. bp->b_flags &= ~_XBF_DELWRI_Q;
  94. spin_lock(&bp->b_lock);
  95. atomic_set(&bp->b_lru_ref, 0);
  96. if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
  97. (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
  98. atomic_dec(&bp->b_hold);
  99. ASSERT(atomic_read(&bp->b_hold) >= 1);
  100. spin_unlock(&bp->b_lock);
  101. }
  102. static int
  103. xfs_buf_get_maps(
  104. struct xfs_buf *bp,
  105. int map_count)
  106. {
  107. ASSERT(bp->b_maps == NULL);
  108. bp->b_map_count = map_count;
  109. if (map_count == 1) {
  110. bp->b_maps = &bp->__b_map;
  111. return 0;
  112. }
  113. bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
  114. KM_NOFS);
  115. if (!bp->b_maps)
  116. return -ENOMEM;
  117. return 0;
  118. }
  119. /*
  120. * Frees b_pages if it was allocated.
  121. */
  122. static void
  123. xfs_buf_free_maps(
  124. struct xfs_buf *bp)
  125. {
  126. if (bp->b_maps != &bp->__b_map) {
  127. kmem_free(bp->b_maps);
  128. bp->b_maps = NULL;
  129. }
  130. }
  131. struct xfs_buf *
  132. _xfs_buf_alloc(
  133. struct xfs_buftarg *target,
  134. struct xfs_buf_map *map,
  135. int nmaps,
  136. xfs_buf_flags_t flags)
  137. {
  138. struct xfs_buf *bp;
  139. int error;
  140. int i;
  141. bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
  142. if (unlikely(!bp))
  143. return NULL;
  144. /*
  145. * We don't want certain flags to appear in b_flags unless they are
  146. * specifically set by later operations on the buffer.
  147. */
  148. flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
  149. atomic_set(&bp->b_hold, 1);
  150. atomic_set(&bp->b_lru_ref, 1);
  151. init_completion(&bp->b_iowait);
  152. INIT_LIST_HEAD(&bp->b_lru);
  153. INIT_LIST_HEAD(&bp->b_list);
  154. RB_CLEAR_NODE(&bp->b_rbnode);
  155. sema_init(&bp->b_sema, 0); /* held, no waiters */
  156. spin_lock_init(&bp->b_lock);
  157. XB_SET_OWNER(bp);
  158. bp->b_target = target;
  159. bp->b_flags = flags;
  160. /*
  161. * Set length and io_length to the same value initially.
  162. * I/O routines should use io_length, which will be the same in
  163. * most cases but may be reset (e.g. XFS recovery).
  164. */
  165. error = xfs_buf_get_maps(bp, nmaps);
  166. if (error) {
  167. kmem_zone_free(xfs_buf_zone, bp);
  168. return NULL;
  169. }
  170. bp->b_bn = map[0].bm_bn;
  171. bp->b_length = 0;
  172. for (i = 0; i < nmaps; i++) {
  173. bp->b_maps[i].bm_bn = map[i].bm_bn;
  174. bp->b_maps[i].bm_len = map[i].bm_len;
  175. bp->b_length += map[i].bm_len;
  176. }
  177. bp->b_io_length = bp->b_length;
  178. atomic_set(&bp->b_pin_count, 0);
  179. init_waitqueue_head(&bp->b_waiters);
  180. XFS_STATS_INC(target->bt_mount, xb_create);
  181. trace_xfs_buf_init(bp, _RET_IP_);
  182. return bp;
  183. }
  184. /*
  185. * Allocate a page array capable of holding a specified number
  186. * of pages, and point the page buf at it.
  187. */
  188. STATIC int
  189. _xfs_buf_get_pages(
  190. xfs_buf_t *bp,
  191. int page_count)
  192. {
  193. /* Make sure that we have a page list */
  194. if (bp->b_pages == NULL) {
  195. bp->b_page_count = page_count;
  196. if (page_count <= XB_PAGES) {
  197. bp->b_pages = bp->b_page_array;
  198. } else {
  199. bp->b_pages = kmem_alloc(sizeof(struct page *) *
  200. page_count, KM_NOFS);
  201. if (bp->b_pages == NULL)
  202. return -ENOMEM;
  203. }
  204. memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
  205. }
  206. return 0;
  207. }
  208. /*
  209. * Frees b_pages if it was allocated.
  210. */
  211. STATIC void
  212. _xfs_buf_free_pages(
  213. xfs_buf_t *bp)
  214. {
  215. if (bp->b_pages != bp->b_page_array) {
  216. kmem_free(bp->b_pages);
  217. bp->b_pages = NULL;
  218. }
  219. }
  220. /*
  221. * Releases the specified buffer.
  222. *
  223. * The modification state of any associated pages is left unchanged.
  224. * The buffer must not be on any hash - use xfs_buf_rele instead for
  225. * hashed and refcounted buffers
  226. */
  227. void
  228. xfs_buf_free(
  229. xfs_buf_t *bp)
  230. {
  231. trace_xfs_buf_free(bp, _RET_IP_);
  232. ASSERT(list_empty(&bp->b_lru));
  233. if (bp->b_flags & _XBF_PAGES) {
  234. uint i;
  235. if (xfs_buf_is_vmapped(bp))
  236. vm_unmap_ram(bp->b_addr - bp->b_offset,
  237. bp->b_page_count);
  238. for (i = 0; i < bp->b_page_count; i++) {
  239. struct page *page = bp->b_pages[i];
  240. __free_page(page);
  241. }
  242. } else if (bp->b_flags & _XBF_KMEM)
  243. kmem_free(bp->b_addr);
  244. _xfs_buf_free_pages(bp);
  245. xfs_buf_free_maps(bp);
  246. kmem_zone_free(xfs_buf_zone, bp);
  247. }
  248. /*
  249. * Allocates all the pages for buffer in question and builds it's page list.
  250. */
  251. STATIC int
  252. xfs_buf_allocate_memory(
  253. xfs_buf_t *bp,
  254. uint flags)
  255. {
  256. size_t size;
  257. size_t nbytes, offset;
  258. gfp_t gfp_mask = xb_to_gfp(flags);
  259. unsigned short page_count, i;
  260. xfs_off_t start, end;
  261. int error;
  262. /*
  263. * for buffers that are contained within a single page, just allocate
  264. * the memory from the heap - there's no need for the complexity of
  265. * page arrays to keep allocation down to order 0.
  266. */
  267. size = BBTOB(bp->b_length);
  268. if (size < PAGE_SIZE) {
  269. bp->b_addr = kmem_alloc(size, KM_NOFS);
  270. if (!bp->b_addr) {
  271. /* low memory - use alloc_page loop instead */
  272. goto use_alloc_page;
  273. }
  274. if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
  275. ((unsigned long)bp->b_addr & PAGE_MASK)) {
  276. /* b_addr spans two pages - use alloc_page instead */
  277. kmem_free(bp->b_addr);
  278. bp->b_addr = NULL;
  279. goto use_alloc_page;
  280. }
  281. bp->b_offset = offset_in_page(bp->b_addr);
  282. bp->b_pages = bp->b_page_array;
  283. bp->b_pages[0] = virt_to_page(bp->b_addr);
  284. bp->b_page_count = 1;
  285. bp->b_flags |= _XBF_KMEM;
  286. return 0;
  287. }
  288. use_alloc_page:
  289. start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
  290. end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
  291. >> PAGE_SHIFT;
  292. page_count = end - start;
  293. error = _xfs_buf_get_pages(bp, page_count);
  294. if (unlikely(error))
  295. return error;
  296. offset = bp->b_offset;
  297. bp->b_flags |= _XBF_PAGES;
  298. for (i = 0; i < bp->b_page_count; i++) {
  299. struct page *page;
  300. uint retries = 0;
  301. retry:
  302. page = alloc_page(gfp_mask);
  303. if (unlikely(page == NULL)) {
  304. if (flags & XBF_READ_AHEAD) {
  305. bp->b_page_count = i;
  306. error = -ENOMEM;
  307. goto out_free_pages;
  308. }
  309. /*
  310. * This could deadlock.
  311. *
  312. * But until all the XFS lowlevel code is revamped to
  313. * handle buffer allocation failures we can't do much.
  314. */
  315. if (!(++retries % 100))
  316. xfs_err(NULL,
  317. "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
  318. current->comm, current->pid,
  319. __func__, gfp_mask);
  320. XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries);
  321. congestion_wait(BLK_RW_ASYNC, HZ/50);
  322. goto retry;
  323. }
  324. XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found);
  325. nbytes = min_t(size_t, size, PAGE_SIZE - offset);
  326. size -= nbytes;
  327. bp->b_pages[i] = page;
  328. offset = 0;
  329. }
  330. return 0;
  331. out_free_pages:
  332. for (i = 0; i < bp->b_page_count; i++)
  333. __free_page(bp->b_pages[i]);
  334. return error;
  335. }
  336. /*
  337. * Map buffer into kernel address-space if necessary.
  338. */
  339. STATIC int
  340. _xfs_buf_map_pages(
  341. xfs_buf_t *bp,
  342. uint flags)
  343. {
  344. ASSERT(bp->b_flags & _XBF_PAGES);
  345. if (bp->b_page_count == 1) {
  346. /* A single page buffer is always mappable */
  347. bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
  348. } else if (flags & XBF_UNMAPPED) {
  349. bp->b_addr = NULL;
  350. } else {
  351. int retried = 0;
  352. unsigned noio_flag;
  353. /*
  354. * vm_map_ram() will allocate auxillary structures (e.g.
  355. * pagetables) with GFP_KERNEL, yet we are likely to be under
  356. * GFP_NOFS context here. Hence we need to tell memory reclaim
  357. * that we are in such a context via PF_MEMALLOC_NOIO to prevent
  358. * memory reclaim re-entering the filesystem here and
  359. * potentially deadlocking.
  360. */
  361. noio_flag = memalloc_noio_save();
  362. do {
  363. bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
  364. -1, PAGE_KERNEL);
  365. if (bp->b_addr)
  366. break;
  367. vm_unmap_aliases();
  368. } while (retried++ <= 1);
  369. memalloc_noio_restore(noio_flag);
  370. if (!bp->b_addr)
  371. return -ENOMEM;
  372. bp->b_addr += bp->b_offset;
  373. }
  374. return 0;
  375. }
  376. /*
  377. * Finding and Reading Buffers
  378. */
  379. /*
  380. * Look up, and creates if absent, a lockable buffer for
  381. * a given range of an inode. The buffer is returned
  382. * locked. No I/O is implied by this call.
  383. */
  384. xfs_buf_t *
  385. _xfs_buf_find(
  386. struct xfs_buftarg *btp,
  387. struct xfs_buf_map *map,
  388. int nmaps,
  389. xfs_buf_flags_t flags,
  390. xfs_buf_t *new_bp)
  391. {
  392. struct xfs_perag *pag;
  393. struct rb_node **rbp;
  394. struct rb_node *parent;
  395. xfs_buf_t *bp;
  396. xfs_daddr_t blkno = map[0].bm_bn;
  397. xfs_daddr_t eofs;
  398. int numblks = 0;
  399. int i;
  400. for (i = 0; i < nmaps; i++)
  401. numblks += map[i].bm_len;
  402. /* Check for IOs smaller than the sector size / not sector aligned */
  403. ASSERT(!(BBTOB(numblks) < btp->bt_meta_sectorsize));
  404. ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
  405. /*
  406. * Corrupted block numbers can get through to here, unfortunately, so we
  407. * have to check that the buffer falls within the filesystem bounds.
  408. */
  409. eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
  410. if (blkno < 0 || blkno >= eofs) {
  411. /*
  412. * XXX (dgc): we should really be returning -EFSCORRUPTED here,
  413. * but none of the higher level infrastructure supports
  414. * returning a specific error on buffer lookup failures.
  415. */
  416. xfs_alert(btp->bt_mount,
  417. "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
  418. __func__, blkno, eofs);
  419. WARN_ON(1);
  420. return NULL;
  421. }
  422. /* get tree root */
  423. pag = xfs_perag_get(btp->bt_mount,
  424. xfs_daddr_to_agno(btp->bt_mount, blkno));
  425. /* walk tree */
  426. spin_lock(&pag->pag_buf_lock);
  427. rbp = &pag->pag_buf_tree.rb_node;
  428. parent = NULL;
  429. bp = NULL;
  430. while (*rbp) {
  431. parent = *rbp;
  432. bp = rb_entry(parent, struct xfs_buf, b_rbnode);
  433. if (blkno < bp->b_bn)
  434. rbp = &(*rbp)->rb_left;
  435. else if (blkno > bp->b_bn)
  436. rbp = &(*rbp)->rb_right;
  437. else {
  438. /*
  439. * found a block number match. If the range doesn't
  440. * match, the only way this is allowed is if the buffer
  441. * in the cache is stale and the transaction that made
  442. * it stale has not yet committed. i.e. we are
  443. * reallocating a busy extent. Skip this buffer and
  444. * continue searching to the right for an exact match.
  445. */
  446. if (bp->b_length != numblks) {
  447. ASSERT(bp->b_flags & XBF_STALE);
  448. rbp = &(*rbp)->rb_right;
  449. continue;
  450. }
  451. atomic_inc(&bp->b_hold);
  452. goto found;
  453. }
  454. }
  455. /* No match found */
  456. if (new_bp) {
  457. rb_link_node(&new_bp->b_rbnode, parent, rbp);
  458. rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
  459. /* the buffer keeps the perag reference until it is freed */
  460. new_bp->b_pag = pag;
  461. spin_unlock(&pag->pag_buf_lock);
  462. } else {
  463. XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
  464. spin_unlock(&pag->pag_buf_lock);
  465. xfs_perag_put(pag);
  466. }
  467. return new_bp;
  468. found:
  469. spin_unlock(&pag->pag_buf_lock);
  470. xfs_perag_put(pag);
  471. if (!xfs_buf_trylock(bp)) {
  472. if (flags & XBF_TRYLOCK) {
  473. xfs_buf_rele(bp);
  474. XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
  475. return NULL;
  476. }
  477. xfs_buf_lock(bp);
  478. XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
  479. }
  480. /*
  481. * if the buffer is stale, clear all the external state associated with
  482. * it. We need to keep flags such as how we allocated the buffer memory
  483. * intact here.
  484. */
  485. if (bp->b_flags & XBF_STALE) {
  486. ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
  487. ASSERT(bp->b_iodone == NULL);
  488. bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
  489. bp->b_ops = NULL;
  490. }
  491. trace_xfs_buf_find(bp, flags, _RET_IP_);
  492. XFS_STATS_INC(btp->bt_mount, xb_get_locked);
  493. return bp;
  494. }
  495. /*
  496. * Assembles a buffer covering the specified range. The code is optimised for
  497. * cache hits, as metadata intensive workloads will see 3 orders of magnitude
  498. * more hits than misses.
  499. */
  500. struct xfs_buf *
  501. xfs_buf_get_map(
  502. struct xfs_buftarg *target,
  503. struct xfs_buf_map *map,
  504. int nmaps,
  505. xfs_buf_flags_t flags)
  506. {
  507. struct xfs_buf *bp;
  508. struct xfs_buf *new_bp;
  509. int error = 0;
  510. bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
  511. if (likely(bp))
  512. goto found;
  513. new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
  514. if (unlikely(!new_bp))
  515. return NULL;
  516. error = xfs_buf_allocate_memory(new_bp, flags);
  517. if (error) {
  518. xfs_buf_free(new_bp);
  519. return NULL;
  520. }
  521. bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
  522. if (!bp) {
  523. xfs_buf_free(new_bp);
  524. return NULL;
  525. }
  526. if (bp != new_bp)
  527. xfs_buf_free(new_bp);
  528. found:
  529. if (!bp->b_addr) {
  530. error = _xfs_buf_map_pages(bp, flags);
  531. if (unlikely(error)) {
  532. xfs_warn(target->bt_mount,
  533. "%s: failed to map pagesn", __func__);
  534. xfs_buf_relse(bp);
  535. return NULL;
  536. }
  537. }
  538. XFS_STATS_INC(target->bt_mount, xb_get);
  539. trace_xfs_buf_get(bp, flags, _RET_IP_);
  540. return bp;
  541. }
  542. STATIC int
  543. _xfs_buf_read(
  544. xfs_buf_t *bp,
  545. xfs_buf_flags_t flags)
  546. {
  547. ASSERT(!(flags & XBF_WRITE));
  548. ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
  549. bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
  550. bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
  551. if (flags & XBF_ASYNC) {
  552. xfs_buf_submit(bp);
  553. return 0;
  554. }
  555. return xfs_buf_submit_wait(bp);
  556. }
  557. xfs_buf_t *
  558. xfs_buf_read_map(
  559. struct xfs_buftarg *target,
  560. struct xfs_buf_map *map,
  561. int nmaps,
  562. xfs_buf_flags_t flags,
  563. const struct xfs_buf_ops *ops)
  564. {
  565. struct xfs_buf *bp;
  566. flags |= XBF_READ;
  567. bp = xfs_buf_get_map(target, map, nmaps, flags);
  568. if (bp) {
  569. trace_xfs_buf_read(bp, flags, _RET_IP_);
  570. if (!XFS_BUF_ISDONE(bp)) {
  571. XFS_STATS_INC(target->bt_mount, xb_get_read);
  572. bp->b_ops = ops;
  573. _xfs_buf_read(bp, flags);
  574. } else if (flags & XBF_ASYNC) {
  575. /*
  576. * Read ahead call which is already satisfied,
  577. * drop the buffer
  578. */
  579. xfs_buf_relse(bp);
  580. return NULL;
  581. } else {
  582. /* We do not want read in the flags */
  583. bp->b_flags &= ~XBF_READ;
  584. }
  585. }
  586. return bp;
  587. }
  588. /*
  589. * If we are not low on memory then do the readahead in a deadlock
  590. * safe manner.
  591. */
  592. void
  593. xfs_buf_readahead_map(
  594. struct xfs_buftarg *target,
  595. struct xfs_buf_map *map,
  596. int nmaps,
  597. const struct xfs_buf_ops *ops)
  598. {
  599. if (bdi_read_congested(target->bt_bdi))
  600. return;
  601. xfs_buf_read_map(target, map, nmaps,
  602. XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
  603. }
  604. /*
  605. * Read an uncached buffer from disk. Allocates and returns a locked
  606. * buffer containing the disk contents or nothing.
  607. */
  608. int
  609. xfs_buf_read_uncached(
  610. struct xfs_buftarg *target,
  611. xfs_daddr_t daddr,
  612. size_t numblks,
  613. int flags,
  614. struct xfs_buf **bpp,
  615. const struct xfs_buf_ops *ops)
  616. {
  617. struct xfs_buf *bp;
  618. *bpp = NULL;
  619. bp = xfs_buf_get_uncached(target, numblks, flags);
  620. if (!bp)
  621. return -ENOMEM;
  622. /* set up the buffer for a read IO */
  623. ASSERT(bp->b_map_count == 1);
  624. bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */
  625. bp->b_maps[0].bm_bn = daddr;
  626. bp->b_flags |= XBF_READ;
  627. bp->b_ops = ops;
  628. xfs_buf_submit_wait(bp);
  629. if (bp->b_error) {
  630. int error = bp->b_error;
  631. xfs_buf_relse(bp);
  632. return error;
  633. }
  634. *bpp = bp;
  635. return 0;
  636. }
  637. /*
  638. * Return a buffer allocated as an empty buffer and associated to external
  639. * memory via xfs_buf_associate_memory() back to it's empty state.
  640. */
  641. void
  642. xfs_buf_set_empty(
  643. struct xfs_buf *bp,
  644. size_t numblks)
  645. {
  646. if (bp->b_pages)
  647. _xfs_buf_free_pages(bp);
  648. bp->b_pages = NULL;
  649. bp->b_page_count = 0;
  650. bp->b_addr = NULL;
  651. bp->b_length = numblks;
  652. bp->b_io_length = numblks;
  653. ASSERT(bp->b_map_count == 1);
  654. bp->b_bn = XFS_BUF_DADDR_NULL;
  655. bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
  656. bp->b_maps[0].bm_len = bp->b_length;
  657. }
  658. static inline struct page *
  659. mem_to_page(
  660. void *addr)
  661. {
  662. if ((!is_vmalloc_addr(addr))) {
  663. return virt_to_page(addr);
  664. } else {
  665. return vmalloc_to_page(addr);
  666. }
  667. }
  668. int
  669. xfs_buf_associate_memory(
  670. xfs_buf_t *bp,
  671. void *mem,
  672. size_t len)
  673. {
  674. int rval;
  675. int i = 0;
  676. unsigned long pageaddr;
  677. unsigned long offset;
  678. size_t buflen;
  679. int page_count;
  680. pageaddr = (unsigned long)mem & PAGE_MASK;
  681. offset = (unsigned long)mem - pageaddr;
  682. buflen = PAGE_ALIGN(len + offset);
  683. page_count = buflen >> PAGE_SHIFT;
  684. /* Free any previous set of page pointers */
  685. if (bp->b_pages)
  686. _xfs_buf_free_pages(bp);
  687. bp->b_pages = NULL;
  688. bp->b_addr = mem;
  689. rval = _xfs_buf_get_pages(bp, page_count);
  690. if (rval)
  691. return rval;
  692. bp->b_offset = offset;
  693. for (i = 0; i < bp->b_page_count; i++) {
  694. bp->b_pages[i] = mem_to_page((void *)pageaddr);
  695. pageaddr += PAGE_SIZE;
  696. }
  697. bp->b_io_length = BTOBB(len);
  698. bp->b_length = BTOBB(buflen);
  699. return 0;
  700. }
  701. xfs_buf_t *
  702. xfs_buf_get_uncached(
  703. struct xfs_buftarg *target,
  704. size_t numblks,
  705. int flags)
  706. {
  707. unsigned long page_count;
  708. int error, i;
  709. struct xfs_buf *bp;
  710. DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
  711. bp = _xfs_buf_alloc(target, &map, 1, 0);
  712. if (unlikely(bp == NULL))
  713. goto fail;
  714. page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
  715. error = _xfs_buf_get_pages(bp, page_count);
  716. if (error)
  717. goto fail_free_buf;
  718. for (i = 0; i < page_count; i++) {
  719. bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
  720. if (!bp->b_pages[i])
  721. goto fail_free_mem;
  722. }
  723. bp->b_flags |= _XBF_PAGES;
  724. error = _xfs_buf_map_pages(bp, 0);
  725. if (unlikely(error)) {
  726. xfs_warn(target->bt_mount,
  727. "%s: failed to map pages", __func__);
  728. goto fail_free_mem;
  729. }
  730. trace_xfs_buf_get_uncached(bp, _RET_IP_);
  731. return bp;
  732. fail_free_mem:
  733. while (--i >= 0)
  734. __free_page(bp->b_pages[i]);
  735. _xfs_buf_free_pages(bp);
  736. fail_free_buf:
  737. xfs_buf_free_maps(bp);
  738. kmem_zone_free(xfs_buf_zone, bp);
  739. fail:
  740. return NULL;
  741. }
  742. /*
  743. * Increment reference count on buffer, to hold the buffer concurrently
  744. * with another thread which may release (free) the buffer asynchronously.
  745. * Must hold the buffer already to call this function.
  746. */
  747. void
  748. xfs_buf_hold(
  749. xfs_buf_t *bp)
  750. {
  751. trace_xfs_buf_hold(bp, _RET_IP_);
  752. atomic_inc(&bp->b_hold);
  753. }
  754. /*
  755. * Releases a hold on the specified buffer. If the
  756. * the hold count is 1, calls xfs_buf_free.
  757. */
  758. void
  759. xfs_buf_rele(
  760. xfs_buf_t *bp)
  761. {
  762. struct xfs_perag *pag = bp->b_pag;
  763. trace_xfs_buf_rele(bp, _RET_IP_);
  764. if (!pag) {
  765. ASSERT(list_empty(&bp->b_lru));
  766. ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
  767. if (atomic_dec_and_test(&bp->b_hold))
  768. xfs_buf_free(bp);
  769. return;
  770. }
  771. ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
  772. ASSERT(atomic_read(&bp->b_hold) > 0);
  773. if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
  774. spin_lock(&bp->b_lock);
  775. if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
  776. /*
  777. * If the buffer is added to the LRU take a new
  778. * reference to the buffer for the LRU and clear the
  779. * (now stale) dispose list state flag
  780. */
  781. if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
  782. bp->b_state &= ~XFS_BSTATE_DISPOSE;
  783. atomic_inc(&bp->b_hold);
  784. }
  785. spin_unlock(&bp->b_lock);
  786. spin_unlock(&pag->pag_buf_lock);
  787. } else {
  788. /*
  789. * most of the time buffers will already be removed from
  790. * the LRU, so optimise that case by checking for the
  791. * XFS_BSTATE_DISPOSE flag indicating the last list the
  792. * buffer was on was the disposal list
  793. */
  794. if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
  795. list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
  796. } else {
  797. ASSERT(list_empty(&bp->b_lru));
  798. }
  799. spin_unlock(&bp->b_lock);
  800. ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
  801. rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
  802. spin_unlock(&pag->pag_buf_lock);
  803. xfs_perag_put(pag);
  804. xfs_buf_free(bp);
  805. }
  806. }
  807. }
  808. /*
  809. * Lock a buffer object, if it is not already locked.
  810. *
  811. * If we come across a stale, pinned, locked buffer, we know that we are
  812. * being asked to lock a buffer that has been reallocated. Because it is
  813. * pinned, we know that the log has not been pushed to disk and hence it
  814. * will still be locked. Rather than continuing to have trylock attempts
  815. * fail until someone else pushes the log, push it ourselves before
  816. * returning. This means that the xfsaild will not get stuck trying
  817. * to push on stale inode buffers.
  818. */
  819. int
  820. xfs_buf_trylock(
  821. struct xfs_buf *bp)
  822. {
  823. int locked;
  824. locked = down_trylock(&bp->b_sema) == 0;
  825. if (locked)
  826. XB_SET_OWNER(bp);
  827. trace_xfs_buf_trylock(bp, _RET_IP_);
  828. return locked;
  829. }
  830. /*
  831. * Lock a buffer object.
  832. *
  833. * If we come across a stale, pinned, locked buffer, we know that we
  834. * are being asked to lock a buffer that has been reallocated. Because
  835. * it is pinned, we know that the log has not been pushed to disk and
  836. * hence it will still be locked. Rather than sleeping until someone
  837. * else pushes the log, push it ourselves before trying to get the lock.
  838. */
  839. void
  840. xfs_buf_lock(
  841. struct xfs_buf *bp)
  842. {
  843. trace_xfs_buf_lock(bp, _RET_IP_);
  844. if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
  845. xfs_log_force(bp->b_target->bt_mount, 0);
  846. down(&bp->b_sema);
  847. XB_SET_OWNER(bp);
  848. trace_xfs_buf_lock_done(bp, _RET_IP_);
  849. }
  850. void
  851. xfs_buf_unlock(
  852. struct xfs_buf *bp)
  853. {
  854. XB_CLEAR_OWNER(bp);
  855. up(&bp->b_sema);
  856. trace_xfs_buf_unlock(bp, _RET_IP_);
  857. }
  858. STATIC void
  859. xfs_buf_wait_unpin(
  860. xfs_buf_t *bp)
  861. {
  862. DECLARE_WAITQUEUE (wait, current);
  863. if (atomic_read(&bp->b_pin_count) == 0)
  864. return;
  865. add_wait_queue(&bp->b_waiters, &wait);
  866. for (;;) {
  867. set_current_state(TASK_UNINTERRUPTIBLE);
  868. if (atomic_read(&bp->b_pin_count) == 0)
  869. break;
  870. io_schedule();
  871. }
  872. remove_wait_queue(&bp->b_waiters, &wait);
  873. set_current_state(TASK_RUNNING);
  874. }
  875. /*
  876. * Buffer Utility Routines
  877. */
  878. void
  879. xfs_buf_ioend(
  880. struct xfs_buf *bp)
  881. {
  882. bool read = bp->b_flags & XBF_READ;
  883. trace_xfs_buf_iodone(bp, _RET_IP_);
  884. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
  885. /*
  886. * Pull in IO completion errors now. We are guaranteed to be running
  887. * single threaded, so we don't need the lock to read b_io_error.
  888. */
  889. if (!bp->b_error && bp->b_io_error)
  890. xfs_buf_ioerror(bp, bp->b_io_error);
  891. /* Only validate buffers that were read without errors */
  892. if (read && !bp->b_error && bp->b_ops) {
  893. ASSERT(!bp->b_iodone);
  894. bp->b_ops->verify_read(bp);
  895. }
  896. if (!bp->b_error)
  897. bp->b_flags |= XBF_DONE;
  898. if (bp->b_iodone)
  899. (*(bp->b_iodone))(bp);
  900. else if (bp->b_flags & XBF_ASYNC)
  901. xfs_buf_relse(bp);
  902. else
  903. complete(&bp->b_iowait);
  904. }
  905. static void
  906. xfs_buf_ioend_work(
  907. struct work_struct *work)
  908. {
  909. struct xfs_buf *bp =
  910. container_of(work, xfs_buf_t, b_ioend_work);
  911. xfs_buf_ioend(bp);
  912. }
  913. void
  914. xfs_buf_ioend_async(
  915. struct xfs_buf *bp)
  916. {
  917. INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
  918. queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
  919. }
  920. void
  921. xfs_buf_ioerror(
  922. xfs_buf_t *bp,
  923. int error)
  924. {
  925. ASSERT(error <= 0 && error >= -1000);
  926. bp->b_error = error;
  927. trace_xfs_buf_ioerror(bp, error, _RET_IP_);
  928. }
  929. void
  930. xfs_buf_ioerror_alert(
  931. struct xfs_buf *bp,
  932. const char *func)
  933. {
  934. xfs_alert(bp->b_target->bt_mount,
  935. "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
  936. (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
  937. }
  938. int
  939. xfs_bwrite(
  940. struct xfs_buf *bp)
  941. {
  942. int error;
  943. ASSERT(xfs_buf_islocked(bp));
  944. bp->b_flags |= XBF_WRITE;
  945. bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
  946. XBF_WRITE_FAIL | XBF_DONE);
  947. error = xfs_buf_submit_wait(bp);
  948. if (error) {
  949. xfs_force_shutdown(bp->b_target->bt_mount,
  950. SHUTDOWN_META_IO_ERROR);
  951. }
  952. return error;
  953. }
  954. STATIC void
  955. xfs_buf_bio_end_io(
  956. struct bio *bio)
  957. {
  958. xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
  959. /*
  960. * don't overwrite existing errors - otherwise we can lose errors on
  961. * buffers that require multiple bios to complete.
  962. */
  963. if (bio->bi_error) {
  964. spin_lock(&bp->b_lock);
  965. if (!bp->b_io_error)
  966. bp->b_io_error = bio->bi_error;
  967. spin_unlock(&bp->b_lock);
  968. }
  969. if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
  970. invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
  971. if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
  972. xfs_buf_ioend_async(bp);
  973. bio_put(bio);
  974. }
  975. static void
  976. xfs_buf_ioapply_map(
  977. struct xfs_buf *bp,
  978. int map,
  979. int *buf_offset,
  980. int *count,
  981. int rw)
  982. {
  983. int page_index;
  984. int total_nr_pages = bp->b_page_count;
  985. int nr_pages;
  986. struct bio *bio;
  987. sector_t sector = bp->b_maps[map].bm_bn;
  988. int size;
  989. int offset;
  990. total_nr_pages = bp->b_page_count;
  991. /* skip the pages in the buffer before the start offset */
  992. page_index = 0;
  993. offset = *buf_offset;
  994. while (offset >= PAGE_SIZE) {
  995. page_index++;
  996. offset -= PAGE_SIZE;
  997. }
  998. /*
  999. * Limit the IO size to the length of the current vector, and update the
  1000. * remaining IO count for the next time around.
  1001. */
  1002. size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
  1003. *count -= size;
  1004. *buf_offset += size;
  1005. next_chunk:
  1006. atomic_inc(&bp->b_io_remaining);
  1007. nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
  1008. if (nr_pages > total_nr_pages)
  1009. nr_pages = total_nr_pages;
  1010. bio = bio_alloc(GFP_NOIO, nr_pages);
  1011. bio->bi_bdev = bp->b_target->bt_bdev;
  1012. bio->bi_iter.bi_sector = sector;
  1013. bio->bi_end_io = xfs_buf_bio_end_io;
  1014. bio->bi_private = bp;
  1015. for (; size && nr_pages; nr_pages--, page_index++) {
  1016. int rbytes, nbytes = PAGE_SIZE - offset;
  1017. if (nbytes > size)
  1018. nbytes = size;
  1019. rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
  1020. offset);
  1021. if (rbytes < nbytes)
  1022. break;
  1023. offset = 0;
  1024. sector += BTOBB(nbytes);
  1025. size -= nbytes;
  1026. total_nr_pages--;
  1027. }
  1028. if (likely(bio->bi_iter.bi_size)) {
  1029. if (xfs_buf_is_vmapped(bp)) {
  1030. flush_kernel_vmap_range(bp->b_addr,
  1031. xfs_buf_vmap_len(bp));
  1032. }
  1033. submit_bio(rw, bio);
  1034. if (size)
  1035. goto next_chunk;
  1036. } else {
  1037. /*
  1038. * This is guaranteed not to be the last io reference count
  1039. * because the caller (xfs_buf_submit) holds a count itself.
  1040. */
  1041. atomic_dec(&bp->b_io_remaining);
  1042. xfs_buf_ioerror(bp, -EIO);
  1043. bio_put(bio);
  1044. }
  1045. }
  1046. STATIC void
  1047. _xfs_buf_ioapply(
  1048. struct xfs_buf *bp)
  1049. {
  1050. struct blk_plug plug;
  1051. int rw;
  1052. int offset;
  1053. int size;
  1054. int i;
  1055. /*
  1056. * Make sure we capture only current IO errors rather than stale errors
  1057. * left over from previous use of the buffer (e.g. failed readahead).
  1058. */
  1059. bp->b_error = 0;
  1060. /*
  1061. * Initialize the I/O completion workqueue if we haven't yet or the
  1062. * submitter has not opted to specify a custom one.
  1063. */
  1064. if (!bp->b_ioend_wq)
  1065. bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
  1066. if (bp->b_flags & XBF_WRITE) {
  1067. if (bp->b_flags & XBF_SYNCIO)
  1068. rw = WRITE_SYNC;
  1069. else
  1070. rw = WRITE;
  1071. if (bp->b_flags & XBF_FUA)
  1072. rw |= REQ_FUA;
  1073. if (bp->b_flags & XBF_FLUSH)
  1074. rw |= REQ_FLUSH;
  1075. /*
  1076. * Run the write verifier callback function if it exists. If
  1077. * this function fails it will mark the buffer with an error and
  1078. * the IO should not be dispatched.
  1079. */
  1080. if (bp->b_ops) {
  1081. bp->b_ops->verify_write(bp);
  1082. if (bp->b_error) {
  1083. xfs_force_shutdown(bp->b_target->bt_mount,
  1084. SHUTDOWN_CORRUPT_INCORE);
  1085. return;
  1086. }
  1087. } else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
  1088. struct xfs_mount *mp = bp->b_target->bt_mount;
  1089. /*
  1090. * non-crc filesystems don't attach verifiers during
  1091. * log recovery, so don't warn for such filesystems.
  1092. */
  1093. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  1094. xfs_warn(mp,
  1095. "%s: no ops on block 0x%llx/0x%x",
  1096. __func__, bp->b_bn, bp->b_length);
  1097. xfs_hex_dump(bp->b_addr, 64);
  1098. dump_stack();
  1099. }
  1100. }
  1101. } else if (bp->b_flags & XBF_READ_AHEAD) {
  1102. rw = READA;
  1103. } else {
  1104. rw = READ;
  1105. }
  1106. /* we only use the buffer cache for meta-data */
  1107. rw |= REQ_META;
  1108. /*
  1109. * Walk all the vectors issuing IO on them. Set up the initial offset
  1110. * into the buffer and the desired IO size before we start -
  1111. * _xfs_buf_ioapply_vec() will modify them appropriately for each
  1112. * subsequent call.
  1113. */
  1114. offset = bp->b_offset;
  1115. size = BBTOB(bp->b_io_length);
  1116. blk_start_plug(&plug);
  1117. for (i = 0; i < bp->b_map_count; i++) {
  1118. xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
  1119. if (bp->b_error)
  1120. break;
  1121. if (size <= 0)
  1122. break; /* all done */
  1123. }
  1124. blk_finish_plug(&plug);
  1125. }
  1126. /*
  1127. * Asynchronous IO submission path. This transfers the buffer lock ownership and
  1128. * the current reference to the IO. It is not safe to reference the buffer after
  1129. * a call to this function unless the caller holds an additional reference
  1130. * itself.
  1131. */
  1132. void
  1133. xfs_buf_submit(
  1134. struct xfs_buf *bp)
  1135. {
  1136. trace_xfs_buf_submit(bp, _RET_IP_);
  1137. ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
  1138. ASSERT(bp->b_flags & XBF_ASYNC);
  1139. /* on shutdown we stale and complete the buffer immediately */
  1140. if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
  1141. xfs_buf_ioerror(bp, -EIO);
  1142. bp->b_flags &= ~XBF_DONE;
  1143. xfs_buf_stale(bp);
  1144. xfs_buf_ioend(bp);
  1145. return;
  1146. }
  1147. if (bp->b_flags & XBF_WRITE)
  1148. xfs_buf_wait_unpin(bp);
  1149. /* clear the internal error state to avoid spurious errors */
  1150. bp->b_io_error = 0;
  1151. /*
  1152. * The caller's reference is released during I/O completion.
  1153. * This occurs some time after the last b_io_remaining reference is
  1154. * released, so after we drop our Io reference we have to have some
  1155. * other reference to ensure the buffer doesn't go away from underneath
  1156. * us. Take a direct reference to ensure we have safe access to the
  1157. * buffer until we are finished with it.
  1158. */
  1159. xfs_buf_hold(bp);
  1160. /*
  1161. * Set the count to 1 initially, this will stop an I/O completion
  1162. * callout which happens before we have started all the I/O from calling
  1163. * xfs_buf_ioend too early.
  1164. */
  1165. atomic_set(&bp->b_io_remaining, 1);
  1166. _xfs_buf_ioapply(bp);
  1167. /*
  1168. * If _xfs_buf_ioapply failed, we can get back here with only the IO
  1169. * reference we took above. If we drop it to zero, run completion so
  1170. * that we don't return to the caller with completion still pending.
  1171. */
  1172. if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
  1173. if (bp->b_error)
  1174. xfs_buf_ioend(bp);
  1175. else
  1176. xfs_buf_ioend_async(bp);
  1177. }
  1178. xfs_buf_rele(bp);
  1179. /* Note: it is not safe to reference bp now we've dropped our ref */
  1180. }
  1181. /*
  1182. * Synchronous buffer IO submission path, read or write.
  1183. */
  1184. int
  1185. xfs_buf_submit_wait(
  1186. struct xfs_buf *bp)
  1187. {
  1188. int error;
  1189. trace_xfs_buf_submit_wait(bp, _RET_IP_);
  1190. ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
  1191. if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
  1192. xfs_buf_ioerror(bp, -EIO);
  1193. xfs_buf_stale(bp);
  1194. bp->b_flags &= ~XBF_DONE;
  1195. return -EIO;
  1196. }
  1197. if (bp->b_flags & XBF_WRITE)
  1198. xfs_buf_wait_unpin(bp);
  1199. /* clear the internal error state to avoid spurious errors */
  1200. bp->b_io_error = 0;
  1201. /*
  1202. * For synchronous IO, the IO does not inherit the submitters reference
  1203. * count, nor the buffer lock. Hence we cannot release the reference we
  1204. * are about to take until we've waited for all IO completion to occur,
  1205. * including any xfs_buf_ioend_async() work that may be pending.
  1206. */
  1207. xfs_buf_hold(bp);
  1208. /*
  1209. * Set the count to 1 initially, this will stop an I/O completion
  1210. * callout which happens before we have started all the I/O from calling
  1211. * xfs_buf_ioend too early.
  1212. */
  1213. atomic_set(&bp->b_io_remaining, 1);
  1214. _xfs_buf_ioapply(bp);
  1215. /*
  1216. * make sure we run completion synchronously if it raced with us and is
  1217. * already complete.
  1218. */
  1219. if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
  1220. xfs_buf_ioend(bp);
  1221. /* wait for completion before gathering the error from the buffer */
  1222. trace_xfs_buf_iowait(bp, _RET_IP_);
  1223. wait_for_completion(&bp->b_iowait);
  1224. trace_xfs_buf_iowait_done(bp, _RET_IP_);
  1225. error = bp->b_error;
  1226. /*
  1227. * all done now, we can release the hold that keeps the buffer
  1228. * referenced for the entire IO.
  1229. */
  1230. xfs_buf_rele(bp);
  1231. return error;
  1232. }
  1233. void *
  1234. xfs_buf_offset(
  1235. struct xfs_buf *bp,
  1236. size_t offset)
  1237. {
  1238. struct page *page;
  1239. if (bp->b_addr)
  1240. return bp->b_addr + offset;
  1241. offset += bp->b_offset;
  1242. page = bp->b_pages[offset >> PAGE_SHIFT];
  1243. return page_address(page) + (offset & (PAGE_SIZE-1));
  1244. }
  1245. /*
  1246. * Move data into or out of a buffer.
  1247. */
  1248. void
  1249. xfs_buf_iomove(
  1250. xfs_buf_t *bp, /* buffer to process */
  1251. size_t boff, /* starting buffer offset */
  1252. size_t bsize, /* length to copy */
  1253. void *data, /* data address */
  1254. xfs_buf_rw_t mode) /* read/write/zero flag */
  1255. {
  1256. size_t bend;
  1257. bend = boff + bsize;
  1258. while (boff < bend) {
  1259. struct page *page;
  1260. int page_index, page_offset, csize;
  1261. page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
  1262. page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
  1263. page = bp->b_pages[page_index];
  1264. csize = min_t(size_t, PAGE_SIZE - page_offset,
  1265. BBTOB(bp->b_io_length) - boff);
  1266. ASSERT((csize + page_offset) <= PAGE_SIZE);
  1267. switch (mode) {
  1268. case XBRW_ZERO:
  1269. memset(page_address(page) + page_offset, 0, csize);
  1270. break;
  1271. case XBRW_READ:
  1272. memcpy(data, page_address(page) + page_offset, csize);
  1273. break;
  1274. case XBRW_WRITE:
  1275. memcpy(page_address(page) + page_offset, data, csize);
  1276. }
  1277. boff += csize;
  1278. data += csize;
  1279. }
  1280. }
  1281. /*
  1282. * Handling of buffer targets (buftargs).
  1283. */
  1284. /*
  1285. * Wait for any bufs with callbacks that have been submitted but have not yet
  1286. * returned. These buffers will have an elevated hold count, so wait on those
  1287. * while freeing all the buffers only held by the LRU.
  1288. */
  1289. static enum lru_status
  1290. xfs_buftarg_wait_rele(
  1291. struct list_head *item,
  1292. struct list_lru_one *lru,
  1293. spinlock_t *lru_lock,
  1294. void *arg)
  1295. {
  1296. struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
  1297. struct list_head *dispose = arg;
  1298. if (atomic_read(&bp->b_hold) > 1) {
  1299. /* need to wait, so skip it this pass */
  1300. trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
  1301. return LRU_SKIP;
  1302. }
  1303. if (!spin_trylock(&bp->b_lock))
  1304. return LRU_SKIP;
  1305. /*
  1306. * clear the LRU reference count so the buffer doesn't get
  1307. * ignored in xfs_buf_rele().
  1308. */
  1309. atomic_set(&bp->b_lru_ref, 0);
  1310. bp->b_state |= XFS_BSTATE_DISPOSE;
  1311. list_lru_isolate_move(lru, item, dispose);
  1312. spin_unlock(&bp->b_lock);
  1313. return LRU_REMOVED;
  1314. }
  1315. void
  1316. xfs_wait_buftarg(
  1317. struct xfs_buftarg *btp)
  1318. {
  1319. LIST_HEAD(dispose);
  1320. int loop = 0;
  1321. /* loop until there is nothing left on the lru list. */
  1322. while (list_lru_count(&btp->bt_lru)) {
  1323. list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
  1324. &dispose, LONG_MAX);
  1325. while (!list_empty(&dispose)) {
  1326. struct xfs_buf *bp;
  1327. bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
  1328. list_del_init(&bp->b_lru);
  1329. if (bp->b_flags & XBF_WRITE_FAIL) {
  1330. xfs_alert(btp->bt_mount,
  1331. "Corruption Alert: Buffer at block 0x%llx had permanent write failures!",
  1332. (long long)bp->b_bn);
  1333. xfs_alert(btp->bt_mount,
  1334. "Please run xfs_repair to determine the extent of the problem.");
  1335. }
  1336. xfs_buf_rele(bp);
  1337. }
  1338. if (loop++ != 0)
  1339. delay(100);
  1340. }
  1341. }
  1342. static enum lru_status
  1343. xfs_buftarg_isolate(
  1344. struct list_head *item,
  1345. struct list_lru_one *lru,
  1346. spinlock_t *lru_lock,
  1347. void *arg)
  1348. {
  1349. struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
  1350. struct list_head *dispose = arg;
  1351. /*
  1352. * we are inverting the lru lock/bp->b_lock here, so use a trylock.
  1353. * If we fail to get the lock, just skip it.
  1354. */
  1355. if (!spin_trylock(&bp->b_lock))
  1356. return LRU_SKIP;
  1357. /*
  1358. * Decrement the b_lru_ref count unless the value is already
  1359. * zero. If the value is already zero, we need to reclaim the
  1360. * buffer, otherwise it gets another trip through the LRU.
  1361. */
  1362. if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
  1363. spin_unlock(&bp->b_lock);
  1364. return LRU_ROTATE;
  1365. }
  1366. bp->b_state |= XFS_BSTATE_DISPOSE;
  1367. list_lru_isolate_move(lru, item, dispose);
  1368. spin_unlock(&bp->b_lock);
  1369. return LRU_REMOVED;
  1370. }
  1371. static unsigned long
  1372. xfs_buftarg_shrink_scan(
  1373. struct shrinker *shrink,
  1374. struct shrink_control *sc)
  1375. {
  1376. struct xfs_buftarg *btp = container_of(shrink,
  1377. struct xfs_buftarg, bt_shrinker);
  1378. LIST_HEAD(dispose);
  1379. unsigned long freed;
  1380. freed = list_lru_shrink_walk(&btp->bt_lru, sc,
  1381. xfs_buftarg_isolate, &dispose);
  1382. while (!list_empty(&dispose)) {
  1383. struct xfs_buf *bp;
  1384. bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
  1385. list_del_init(&bp->b_lru);
  1386. xfs_buf_rele(bp);
  1387. }
  1388. return freed;
  1389. }
  1390. static unsigned long
  1391. xfs_buftarg_shrink_count(
  1392. struct shrinker *shrink,
  1393. struct shrink_control *sc)
  1394. {
  1395. struct xfs_buftarg *btp = container_of(shrink,
  1396. struct xfs_buftarg, bt_shrinker);
  1397. return list_lru_shrink_count(&btp->bt_lru, sc);
  1398. }
  1399. void
  1400. xfs_free_buftarg(
  1401. struct xfs_mount *mp,
  1402. struct xfs_buftarg *btp)
  1403. {
  1404. unregister_shrinker(&btp->bt_shrinker);
  1405. list_lru_destroy(&btp->bt_lru);
  1406. if (mp->m_flags & XFS_MOUNT_BARRIER)
  1407. xfs_blkdev_issue_flush(btp);
  1408. kmem_free(btp);
  1409. }
  1410. int
  1411. xfs_setsize_buftarg(
  1412. xfs_buftarg_t *btp,
  1413. unsigned int sectorsize)
  1414. {
  1415. /* Set up metadata sector size info */
  1416. btp->bt_meta_sectorsize = sectorsize;
  1417. btp->bt_meta_sectormask = sectorsize - 1;
  1418. if (set_blocksize(btp->bt_bdev, sectorsize)) {
  1419. char name[BDEVNAME_SIZE];
  1420. bdevname(btp->bt_bdev, name);
  1421. xfs_warn(btp->bt_mount,
  1422. "Cannot set_blocksize to %u on device %s",
  1423. sectorsize, name);
  1424. return -EINVAL;
  1425. }
  1426. /* Set up device logical sector size mask */
  1427. btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
  1428. btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
  1429. return 0;
  1430. }
  1431. /*
  1432. * When allocating the initial buffer target we have not yet
  1433. * read in the superblock, so don't know what sized sectors
  1434. * are being used at this early stage. Play safe.
  1435. */
  1436. STATIC int
  1437. xfs_setsize_buftarg_early(
  1438. xfs_buftarg_t *btp,
  1439. struct block_device *bdev)
  1440. {
  1441. return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
  1442. }
  1443. xfs_buftarg_t *
  1444. xfs_alloc_buftarg(
  1445. struct xfs_mount *mp,
  1446. struct block_device *bdev)
  1447. {
  1448. xfs_buftarg_t *btp;
  1449. btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
  1450. btp->bt_mount = mp;
  1451. btp->bt_dev = bdev->bd_dev;
  1452. btp->bt_bdev = bdev;
  1453. btp->bt_bdi = blk_get_backing_dev_info(bdev);
  1454. if (xfs_setsize_buftarg_early(btp, bdev))
  1455. goto error;
  1456. if (list_lru_init(&btp->bt_lru))
  1457. goto error;
  1458. btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
  1459. btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
  1460. btp->bt_shrinker.seeks = DEFAULT_SEEKS;
  1461. btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
  1462. register_shrinker(&btp->bt_shrinker);
  1463. return btp;
  1464. error:
  1465. kmem_free(btp);
  1466. return NULL;
  1467. }
  1468. /*
  1469. * Add a buffer to the delayed write list.
  1470. *
  1471. * This queues a buffer for writeout if it hasn't already been. Note that
  1472. * neither this routine nor the buffer list submission functions perform
  1473. * any internal synchronization. It is expected that the lists are thread-local
  1474. * to the callers.
  1475. *
  1476. * Returns true if we queued up the buffer, or false if it already had
  1477. * been on the buffer list.
  1478. */
  1479. bool
  1480. xfs_buf_delwri_queue(
  1481. struct xfs_buf *bp,
  1482. struct list_head *list)
  1483. {
  1484. ASSERT(xfs_buf_islocked(bp));
  1485. ASSERT(!(bp->b_flags & XBF_READ));
  1486. /*
  1487. * If the buffer is already marked delwri it already is queued up
  1488. * by someone else for imediate writeout. Just ignore it in that
  1489. * case.
  1490. */
  1491. if (bp->b_flags & _XBF_DELWRI_Q) {
  1492. trace_xfs_buf_delwri_queued(bp, _RET_IP_);
  1493. return false;
  1494. }
  1495. trace_xfs_buf_delwri_queue(bp, _RET_IP_);
  1496. /*
  1497. * If a buffer gets written out synchronously or marked stale while it
  1498. * is on a delwri list we lazily remove it. To do this, the other party
  1499. * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
  1500. * It remains referenced and on the list. In a rare corner case it
  1501. * might get readded to a delwri list after the synchronous writeout, in
  1502. * which case we need just need to re-add the flag here.
  1503. */
  1504. bp->b_flags |= _XBF_DELWRI_Q;
  1505. if (list_empty(&bp->b_list)) {
  1506. atomic_inc(&bp->b_hold);
  1507. list_add_tail(&bp->b_list, list);
  1508. }
  1509. return true;
  1510. }
  1511. /*
  1512. * Compare function is more complex than it needs to be because
  1513. * the return value is only 32 bits and we are doing comparisons
  1514. * on 64 bit values
  1515. */
  1516. static int
  1517. xfs_buf_cmp(
  1518. void *priv,
  1519. struct list_head *a,
  1520. struct list_head *b)
  1521. {
  1522. struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
  1523. struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
  1524. xfs_daddr_t diff;
  1525. diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
  1526. if (diff < 0)
  1527. return -1;
  1528. if (diff > 0)
  1529. return 1;
  1530. return 0;
  1531. }
  1532. static int
  1533. __xfs_buf_delwri_submit(
  1534. struct list_head *buffer_list,
  1535. struct list_head *io_list,
  1536. bool wait)
  1537. {
  1538. struct blk_plug plug;
  1539. struct xfs_buf *bp, *n;
  1540. int pinned = 0;
  1541. list_for_each_entry_safe(bp, n, buffer_list, b_list) {
  1542. if (!wait) {
  1543. if (xfs_buf_ispinned(bp)) {
  1544. pinned++;
  1545. continue;
  1546. }
  1547. if (!xfs_buf_trylock(bp))
  1548. continue;
  1549. } else {
  1550. xfs_buf_lock(bp);
  1551. }
  1552. /*
  1553. * Someone else might have written the buffer synchronously or
  1554. * marked it stale in the meantime. In that case only the
  1555. * _XBF_DELWRI_Q flag got cleared, and we have to drop the
  1556. * reference and remove it from the list here.
  1557. */
  1558. if (!(bp->b_flags & _XBF_DELWRI_Q)) {
  1559. list_del_init(&bp->b_list);
  1560. xfs_buf_relse(bp);
  1561. continue;
  1562. }
  1563. list_move_tail(&bp->b_list, io_list);
  1564. trace_xfs_buf_delwri_split(bp, _RET_IP_);
  1565. }
  1566. list_sort(NULL, io_list, xfs_buf_cmp);
  1567. blk_start_plug(&plug);
  1568. list_for_each_entry_safe(bp, n, io_list, b_list) {
  1569. bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
  1570. bp->b_flags |= XBF_WRITE | XBF_ASYNC;
  1571. /*
  1572. * we do all Io submission async. This means if we need to wait
  1573. * for IO completion we need to take an extra reference so the
  1574. * buffer is still valid on the other side.
  1575. */
  1576. if (wait)
  1577. xfs_buf_hold(bp);
  1578. else
  1579. list_del_init(&bp->b_list);
  1580. xfs_buf_submit(bp);
  1581. }
  1582. blk_finish_plug(&plug);
  1583. return pinned;
  1584. }
  1585. /*
  1586. * Write out a buffer list asynchronously.
  1587. *
  1588. * This will take the @buffer_list, write all non-locked and non-pinned buffers
  1589. * out and not wait for I/O completion on any of the buffers. This interface
  1590. * is only safely useable for callers that can track I/O completion by higher
  1591. * level means, e.g. AIL pushing as the @buffer_list is consumed in this
  1592. * function.
  1593. */
  1594. int
  1595. xfs_buf_delwri_submit_nowait(
  1596. struct list_head *buffer_list)
  1597. {
  1598. LIST_HEAD (io_list);
  1599. return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
  1600. }
  1601. /*
  1602. * Write out a buffer list synchronously.
  1603. *
  1604. * This will take the @buffer_list, write all buffers out and wait for I/O
  1605. * completion on all of the buffers. @buffer_list is consumed by the function,
  1606. * so callers must have some other way of tracking buffers if they require such
  1607. * functionality.
  1608. */
  1609. int
  1610. xfs_buf_delwri_submit(
  1611. struct list_head *buffer_list)
  1612. {
  1613. LIST_HEAD (io_list);
  1614. int error = 0, error2;
  1615. struct xfs_buf *bp;
  1616. __xfs_buf_delwri_submit(buffer_list, &io_list, true);
  1617. /* Wait for IO to complete. */
  1618. while (!list_empty(&io_list)) {
  1619. bp = list_first_entry(&io_list, struct xfs_buf, b_list);
  1620. list_del_init(&bp->b_list);
  1621. /* locking the buffer will wait for async IO completion. */
  1622. xfs_buf_lock(bp);
  1623. error2 = bp->b_error;
  1624. xfs_buf_relse(bp);
  1625. if (!error)
  1626. error = error2;
  1627. }
  1628. return error;
  1629. }
  1630. int __init
  1631. xfs_buf_init(void)
  1632. {
  1633. xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
  1634. KM_ZONE_HWALIGN, NULL);
  1635. if (!xfs_buf_zone)
  1636. goto out;
  1637. return 0;
  1638. out:
  1639. return -ENOMEM;
  1640. }
  1641. void
  1642. xfs_buf_terminate(void)
  1643. {
  1644. kmem_zone_destroy(xfs_buf_zone);
  1645. }