pblk-read.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-read.c - pblk's read path
  16. */
  17. #include "pblk.h"
  18. /*
  19. * There is no guarantee that the value read from cache has not been updated and
  20. * resides at another location in the cache. We guarantee though that if the
  21. * value is read from the cache, it belongs to the mapped lba. In order to
  22. * guarantee and order between writes and reads are ordered, a flush must be
  23. * issued.
  24. */
  25. static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
  26. sector_t lba, struct ppa_addr ppa,
  27. int bio_iter, bool advanced_bio)
  28. {
  29. #ifdef CONFIG_NVM_DEBUG
  30. /* Callers must ensure that the ppa points to a cache address */
  31. BUG_ON(pblk_ppa_empty(ppa));
  32. BUG_ON(!pblk_addr_in_cache(ppa));
  33. #endif
  34. return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
  35. bio_iter, advanced_bio);
  36. }
  37. static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
  38. sector_t blba, unsigned long *read_bitmap)
  39. {
  40. struct pblk_sec_meta *meta_list = rqd->meta_list;
  41. struct bio *bio = rqd->bio;
  42. struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
  43. int nr_secs = rqd->nr_ppas;
  44. bool advanced_bio = false;
  45. int i, j = 0;
  46. pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
  47. for (i = 0; i < nr_secs; i++) {
  48. struct ppa_addr p = ppas[i];
  49. sector_t lba = blba + i;
  50. retry:
  51. if (pblk_ppa_empty(p)) {
  52. WARN_ON(test_and_set_bit(i, read_bitmap));
  53. meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
  54. if (unlikely(!advanced_bio)) {
  55. bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
  56. advanced_bio = true;
  57. }
  58. goto next;
  59. }
  60. /* Try to read from write buffer. The address is later checked
  61. * on the write buffer to prevent retrieving overwritten data.
  62. */
  63. if (pblk_addr_in_cache(p)) {
  64. if (!pblk_read_from_cache(pblk, bio, lba, p, i,
  65. advanced_bio)) {
  66. pblk_lookup_l2p_seq(pblk, &p, lba, 1);
  67. goto retry;
  68. }
  69. WARN_ON(test_and_set_bit(i, read_bitmap));
  70. meta_list[i].lba = cpu_to_le64(lba);
  71. advanced_bio = true;
  72. #ifdef CONFIG_NVM_DEBUG
  73. atomic_long_inc(&pblk->cache_reads);
  74. #endif
  75. } else {
  76. /* Read from media non-cached sectors */
  77. rqd->ppa_list[j++] = p;
  78. }
  79. next:
  80. if (advanced_bio)
  81. bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
  82. }
  83. if (pblk_io_aligned(pblk, nr_secs))
  84. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
  85. else
  86. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  87. #ifdef CONFIG_NVM_DEBUG
  88. atomic_long_add(nr_secs, &pblk->inflight_reads);
  89. #endif
  90. }
  91. static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
  92. {
  93. int err;
  94. err = pblk_submit_io(pblk, rqd);
  95. if (err)
  96. return NVM_IO_ERR;
  97. return NVM_IO_OK;
  98. }
  99. static void pblk_read_check(struct pblk *pblk, struct nvm_rq *rqd,
  100. sector_t blba)
  101. {
  102. struct pblk_sec_meta *meta_list = rqd->meta_list;
  103. int nr_lbas = rqd->nr_ppas;
  104. int i;
  105. for (i = 0; i < nr_lbas; i++) {
  106. u64 lba = le64_to_cpu(meta_list[i].lba);
  107. if (lba == ADDR_EMPTY)
  108. continue;
  109. WARN(lba != blba + i, "pblk: corrupted read LBA\n");
  110. }
  111. }
  112. static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
  113. {
  114. struct ppa_addr *ppa_list;
  115. int i;
  116. ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
  117. for (i = 0; i < rqd->nr_ppas; i++) {
  118. struct ppa_addr ppa = ppa_list[i];
  119. struct pblk_line *line;
  120. line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
  121. kref_put(&line->ref, pblk_line_put_wq);
  122. }
  123. }
  124. static void pblk_end_user_read(struct bio *bio)
  125. {
  126. #ifdef CONFIG_NVM_DEBUG
  127. WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
  128. #endif
  129. bio_endio(bio);
  130. bio_put(bio);
  131. }
  132. static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
  133. bool put_line)
  134. {
  135. struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  136. struct bio *bio = rqd->bio;
  137. if (rqd->error)
  138. pblk_log_read_err(pblk, rqd);
  139. #ifdef CONFIG_NVM_DEBUG
  140. else
  141. WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
  142. #endif
  143. pblk_read_check(pblk, rqd, r_ctx->lba);
  144. bio_put(bio);
  145. if (r_ctx->private)
  146. pblk_end_user_read((struct bio *)r_ctx->private);
  147. if (put_line)
  148. pblk_read_put_rqd_kref(pblk, rqd);
  149. #ifdef CONFIG_NVM_DEBUG
  150. atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
  151. atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
  152. #endif
  153. pblk_free_rqd(pblk, rqd, PBLK_READ);
  154. atomic_dec(&pblk->inflight_io);
  155. }
  156. static void pblk_end_io_read(struct nvm_rq *rqd)
  157. {
  158. struct pblk *pblk = rqd->private;
  159. __pblk_end_io_read(pblk, rqd, true);
  160. }
  161. static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
  162. unsigned int bio_init_idx,
  163. unsigned long *read_bitmap)
  164. {
  165. struct bio *new_bio, *bio = rqd->bio;
  166. struct pblk_sec_meta *meta_list = rqd->meta_list;
  167. struct bio_vec src_bv, dst_bv;
  168. void *ppa_ptr = NULL;
  169. void *src_p, *dst_p;
  170. dma_addr_t dma_ppa_list = 0;
  171. __le64 *lba_list_mem, *lba_list_media;
  172. int nr_secs = rqd->nr_ppas;
  173. int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
  174. int i, ret, hole;
  175. /* Re-use allocated memory for intermediate lbas */
  176. lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
  177. lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
  178. new_bio = bio_alloc(GFP_KERNEL, nr_holes);
  179. if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
  180. goto err;
  181. if (nr_holes != new_bio->bi_vcnt) {
  182. pr_err("pblk: malformed bio\n");
  183. goto err;
  184. }
  185. for (i = 0; i < nr_secs; i++)
  186. lba_list_mem[i] = meta_list[i].lba;
  187. new_bio->bi_iter.bi_sector = 0; /* internal bio */
  188. bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
  189. rqd->bio = new_bio;
  190. rqd->nr_ppas = nr_holes;
  191. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  192. if (unlikely(nr_holes == 1)) {
  193. ppa_ptr = rqd->ppa_list;
  194. dma_ppa_list = rqd->dma_ppa_list;
  195. rqd->ppa_addr = rqd->ppa_list[0];
  196. }
  197. ret = pblk_submit_io_sync(pblk, rqd);
  198. if (ret) {
  199. bio_put(rqd->bio);
  200. pr_err("pblk: sync read IO submission failed\n");
  201. goto err;
  202. }
  203. if (rqd->error) {
  204. atomic_long_inc(&pblk->read_failed);
  205. #ifdef CONFIG_NVM_DEBUG
  206. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  207. #endif
  208. }
  209. if (unlikely(nr_holes == 1)) {
  210. struct ppa_addr ppa;
  211. ppa = rqd->ppa_addr;
  212. rqd->ppa_list = ppa_ptr;
  213. rqd->dma_ppa_list = dma_ppa_list;
  214. rqd->ppa_list[0] = ppa;
  215. }
  216. for (i = 0; i < nr_secs; i++) {
  217. lba_list_media[i] = meta_list[i].lba;
  218. meta_list[i].lba = lba_list_mem[i];
  219. }
  220. /* Fill the holes in the original bio */
  221. i = 0;
  222. hole = find_first_zero_bit(read_bitmap, nr_secs);
  223. do {
  224. int line_id = pblk_dev_ppa_to_line(rqd->ppa_list[i]);
  225. struct pblk_line *line = &pblk->lines[line_id];
  226. kref_put(&line->ref, pblk_line_put);
  227. meta_list[hole].lba = lba_list_media[i];
  228. src_bv = new_bio->bi_io_vec[i++];
  229. dst_bv = bio->bi_io_vec[bio_init_idx + hole];
  230. src_p = kmap_atomic(src_bv.bv_page);
  231. dst_p = kmap_atomic(dst_bv.bv_page);
  232. memcpy(dst_p + dst_bv.bv_offset,
  233. src_p + src_bv.bv_offset,
  234. PBLK_EXPOSED_PAGE_SIZE);
  235. kunmap_atomic(src_p);
  236. kunmap_atomic(dst_p);
  237. mempool_free(src_bv.bv_page, pblk->page_bio_pool);
  238. hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
  239. } while (hole < nr_secs);
  240. bio_put(new_bio);
  241. /* Complete the original bio and associated request */
  242. bio_endio(bio);
  243. rqd->bio = bio;
  244. rqd->nr_ppas = nr_secs;
  245. __pblk_end_io_read(pblk, rqd, false);
  246. return NVM_IO_OK;
  247. err:
  248. /* Free allocated pages in new bio */
  249. pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
  250. __pblk_end_io_read(pblk, rqd, false);
  251. return NVM_IO_ERR;
  252. }
  253. static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
  254. sector_t lba, unsigned long *read_bitmap)
  255. {
  256. struct pblk_sec_meta *meta_list = rqd->meta_list;
  257. struct bio *bio = rqd->bio;
  258. struct ppa_addr ppa;
  259. pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
  260. #ifdef CONFIG_NVM_DEBUG
  261. atomic_long_inc(&pblk->inflight_reads);
  262. #endif
  263. retry:
  264. if (pblk_ppa_empty(ppa)) {
  265. WARN_ON(test_and_set_bit(0, read_bitmap));
  266. meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
  267. return;
  268. }
  269. /* Try to read from write buffer. The address is later checked on the
  270. * write buffer to prevent retrieving overwritten data.
  271. */
  272. if (pblk_addr_in_cache(ppa)) {
  273. if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
  274. pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
  275. goto retry;
  276. }
  277. WARN_ON(test_and_set_bit(0, read_bitmap));
  278. meta_list[0].lba = cpu_to_le64(lba);
  279. #ifdef CONFIG_NVM_DEBUG
  280. atomic_long_inc(&pblk->cache_reads);
  281. #endif
  282. } else {
  283. rqd->ppa_addr = ppa;
  284. }
  285. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  286. }
  287. int pblk_submit_read(struct pblk *pblk, struct bio *bio)
  288. {
  289. struct nvm_tgt_dev *dev = pblk->dev;
  290. sector_t blba = pblk_get_lba(bio);
  291. unsigned int nr_secs = pblk_get_secs(bio);
  292. struct pblk_g_ctx *r_ctx;
  293. struct nvm_rq *rqd;
  294. unsigned int bio_init_idx;
  295. unsigned long read_bitmap; /* Max 64 ppas per request */
  296. int ret = NVM_IO_ERR;
  297. /* logic error: lba out-of-bounds. Ignore read request */
  298. if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
  299. WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
  300. (unsigned long long)blba, nr_secs);
  301. return NVM_IO_ERR;
  302. }
  303. bitmap_zero(&read_bitmap, nr_secs);
  304. rqd = pblk_alloc_rqd(pblk, PBLK_READ);
  305. rqd->opcode = NVM_OP_PREAD;
  306. rqd->bio = bio;
  307. rqd->nr_ppas = nr_secs;
  308. rqd->private = pblk;
  309. rqd->end_io = pblk_end_io_read;
  310. r_ctx = nvm_rq_to_pdu(rqd);
  311. r_ctx->lba = blba;
  312. /* Save the index for this bio's start. This is needed in case
  313. * we need to fill a partial read.
  314. */
  315. bio_init_idx = pblk_get_bi_idx(bio);
  316. rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  317. &rqd->dma_meta_list);
  318. if (!rqd->meta_list) {
  319. pr_err("pblk: not able to allocate ppa list\n");
  320. goto fail_rqd_free;
  321. }
  322. if (nr_secs > 1) {
  323. rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
  324. rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
  325. pblk_read_ppalist_rq(pblk, rqd, blba, &read_bitmap);
  326. } else {
  327. pblk_read_rq(pblk, rqd, blba, &read_bitmap);
  328. }
  329. bio_get(bio);
  330. if (bitmap_full(&read_bitmap, nr_secs)) {
  331. bio_endio(bio);
  332. atomic_inc(&pblk->inflight_io);
  333. __pblk_end_io_read(pblk, rqd, false);
  334. return NVM_IO_OK;
  335. }
  336. /* All sectors are to be read from the device */
  337. if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
  338. struct bio *int_bio = NULL;
  339. /* Clone read bio to deal with read errors internally */
  340. int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
  341. if (!int_bio) {
  342. pr_err("pblk: could not clone read bio\n");
  343. return NVM_IO_ERR;
  344. }
  345. rqd->bio = int_bio;
  346. r_ctx->private = bio;
  347. ret = pblk_submit_read_io(pblk, rqd);
  348. if (ret) {
  349. pr_err("pblk: read IO submission failed\n");
  350. if (int_bio)
  351. bio_put(int_bio);
  352. return ret;
  353. }
  354. return NVM_IO_OK;
  355. }
  356. /* The read bio request could be partially filled by the write buffer,
  357. * but there are some holes that need to be read from the drive.
  358. */
  359. ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
  360. if (ret) {
  361. pr_err("pblk: failed to perform partial read\n");
  362. return ret;
  363. }
  364. return NVM_IO_OK;
  365. fail_rqd_free:
  366. pblk_free_rqd(pblk, rqd, PBLK_READ);
  367. return ret;
  368. }
  369. static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
  370. struct pblk_line *line, u64 *lba_list,
  371. u64 *paddr_list_gc, unsigned int nr_secs)
  372. {
  373. struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
  374. struct ppa_addr ppa_gc;
  375. int valid_secs = 0;
  376. int i;
  377. pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
  378. for (i = 0; i < nr_secs; i++) {
  379. if (lba_list[i] == ADDR_EMPTY)
  380. continue;
  381. ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
  382. if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
  383. paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
  384. continue;
  385. }
  386. rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
  387. }
  388. #ifdef CONFIG_NVM_DEBUG
  389. atomic_long_add(valid_secs, &pblk->inflight_reads);
  390. #endif
  391. return valid_secs;
  392. }
  393. static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
  394. struct pblk_line *line, sector_t lba,
  395. u64 paddr_gc)
  396. {
  397. struct ppa_addr ppa_l2p, ppa_gc;
  398. int valid_secs = 0;
  399. if (lba == ADDR_EMPTY)
  400. goto out;
  401. /* logic error: lba out-of-bounds */
  402. if (lba >= pblk->rl.nr_secs) {
  403. WARN(1, "pblk: read lba out of bounds\n");
  404. goto out;
  405. }
  406. spin_lock(&pblk->trans_lock);
  407. ppa_l2p = pblk_trans_map_get(pblk, lba);
  408. spin_unlock(&pblk->trans_lock);
  409. ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
  410. if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
  411. goto out;
  412. rqd->ppa_addr = ppa_l2p;
  413. valid_secs = 1;
  414. #ifdef CONFIG_NVM_DEBUG
  415. atomic_long_inc(&pblk->inflight_reads);
  416. #endif
  417. out:
  418. return valid_secs;
  419. }
  420. int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
  421. {
  422. struct nvm_tgt_dev *dev = pblk->dev;
  423. struct nvm_geo *geo = &dev->geo;
  424. struct bio *bio;
  425. struct nvm_rq rqd;
  426. int data_len;
  427. int ret = NVM_IO_OK;
  428. memset(&rqd, 0, sizeof(struct nvm_rq));
  429. rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  430. &rqd.dma_meta_list);
  431. if (!rqd.meta_list)
  432. return -ENOMEM;
  433. if (gc_rq->nr_secs > 1) {
  434. rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
  435. rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
  436. gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
  437. gc_rq->lba_list,
  438. gc_rq->paddr_list,
  439. gc_rq->nr_secs);
  440. if (gc_rq->secs_to_gc == 1)
  441. rqd.ppa_addr = rqd.ppa_list[0];
  442. } else {
  443. gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
  444. gc_rq->lba_list[0],
  445. gc_rq->paddr_list[0]);
  446. }
  447. if (!(gc_rq->secs_to_gc))
  448. goto out;
  449. data_len = (gc_rq->secs_to_gc) * geo->sec_size;
  450. bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
  451. PBLK_VMALLOC_META, GFP_KERNEL);
  452. if (IS_ERR(bio)) {
  453. pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
  454. goto err_free_dma;
  455. }
  456. bio->bi_iter.bi_sector = 0; /* internal bio */
  457. bio_set_op_attrs(bio, REQ_OP_READ, 0);
  458. rqd.opcode = NVM_OP_PREAD;
  459. rqd.nr_ppas = gc_rq->secs_to_gc;
  460. rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  461. rqd.bio = bio;
  462. if (pblk_submit_io_sync(pblk, &rqd)) {
  463. ret = -EIO;
  464. pr_err("pblk: GC read request failed\n");
  465. goto err_free_bio;
  466. }
  467. atomic_dec(&pblk->inflight_io);
  468. if (rqd.error) {
  469. atomic_long_inc(&pblk->read_failed_gc);
  470. #ifdef CONFIG_NVM_DEBUG
  471. pblk_print_failed_rqd(pblk, &rqd, rqd.error);
  472. #endif
  473. }
  474. #ifdef CONFIG_NVM_DEBUG
  475. atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
  476. atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
  477. atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
  478. #endif
  479. out:
  480. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  481. return ret;
  482. err_free_bio:
  483. bio_put(bio);
  484. err_free_dma:
  485. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  486. return ret;
  487. }