pblk-read.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-read.c - pblk's read path
  16. */
  17. #include "pblk.h"
  18. /*
  19. * There is no guarantee that the value read from cache has not been updated and
  20. * resides at another location in the cache. We guarantee though that if the
  21. * value is read from the cache, it belongs to the mapped lba. In order to
  22. * guarantee and order between writes and reads are ordered, a flush must be
  23. * issued.
  24. */
  25. static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
  26. sector_t lba, struct ppa_addr ppa,
  27. int bio_iter, bool advanced_bio)
  28. {
  29. #ifdef CONFIG_NVM_DEBUG
  30. /* Callers must ensure that the ppa points to a cache address */
  31. BUG_ON(pblk_ppa_empty(ppa));
  32. BUG_ON(!pblk_addr_in_cache(ppa));
  33. #endif
  34. return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
  35. bio_iter, advanced_bio);
  36. }
  37. static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
  38. struct bio *bio, sector_t blba,
  39. unsigned long *read_bitmap)
  40. {
  41. struct pblk_sec_meta *meta_list = rqd->meta_list;
  42. struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
  43. int nr_secs = rqd->nr_ppas;
  44. bool advanced_bio = false;
  45. int i, j = 0;
  46. pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
  47. for (i = 0; i < nr_secs; i++) {
  48. struct ppa_addr p = ppas[i];
  49. sector_t lba = blba + i;
  50. retry:
  51. if (pblk_ppa_empty(p)) {
  52. WARN_ON(test_and_set_bit(i, read_bitmap));
  53. meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
  54. if (unlikely(!advanced_bio)) {
  55. bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
  56. advanced_bio = true;
  57. }
  58. goto next;
  59. }
  60. /* Try to read from write buffer. The address is later checked
  61. * on the write buffer to prevent retrieving overwritten data.
  62. */
  63. if (pblk_addr_in_cache(p)) {
  64. if (!pblk_read_from_cache(pblk, bio, lba, p, i,
  65. advanced_bio)) {
  66. pblk_lookup_l2p_seq(pblk, &p, lba, 1);
  67. goto retry;
  68. }
  69. WARN_ON(test_and_set_bit(i, read_bitmap));
  70. meta_list[i].lba = cpu_to_le64(lba);
  71. advanced_bio = true;
  72. #ifdef CONFIG_NVM_DEBUG
  73. atomic_long_inc(&pblk->cache_reads);
  74. #endif
  75. } else {
  76. /* Read from media non-cached sectors */
  77. rqd->ppa_list[j++] = p;
  78. }
  79. next:
  80. if (advanced_bio)
  81. bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
  82. }
  83. if (pblk_io_aligned(pblk, nr_secs))
  84. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
  85. else
  86. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  87. #ifdef CONFIG_NVM_DEBUG
  88. atomic_long_add(nr_secs, &pblk->inflight_reads);
  89. #endif
  90. }
  91. static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
  92. sector_t blba)
  93. {
  94. struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
  95. int nr_lbas = rqd->nr_ppas;
  96. int i;
  97. for (i = 0; i < nr_lbas; i++) {
  98. u64 lba = le64_to_cpu(meta_lba_list[i].lba);
  99. if (lba == ADDR_EMPTY)
  100. continue;
  101. if (lba != blba + i) {
  102. #ifdef CONFIG_NVM_DEBUG
  103. struct ppa_addr *p;
  104. p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
  105. print_ppa(&pblk->dev->geo, p, "seq", i);
  106. #endif
  107. pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
  108. lba, (u64)blba + i);
  109. WARN_ON(1);
  110. }
  111. }
  112. }
  113. /*
  114. * There can be holes in the lba list.
  115. */
  116. static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
  117. u64 *lba_list, int nr_lbas)
  118. {
  119. struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
  120. int i, j;
  121. for (i = 0, j = 0; i < nr_lbas; i++) {
  122. u64 lba = lba_list[i];
  123. u64 meta_lba;
  124. if (lba == ADDR_EMPTY)
  125. continue;
  126. meta_lba = le64_to_cpu(meta_lba_list[j].lba);
  127. if (lba != meta_lba) {
  128. #ifdef CONFIG_NVM_DEBUG
  129. struct ppa_addr *p;
  130. int nr_ppas = rqd->nr_ppas;
  131. p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
  132. print_ppa(&pblk->dev->geo, p, "seq", j);
  133. #endif
  134. pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
  135. lba, meta_lba);
  136. WARN_ON(1);
  137. }
  138. j++;
  139. }
  140. WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
  141. }
  142. static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
  143. {
  144. struct ppa_addr *ppa_list;
  145. int i;
  146. ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
  147. for (i = 0; i < rqd->nr_ppas; i++) {
  148. struct ppa_addr ppa = ppa_list[i];
  149. struct pblk_line *line;
  150. line = &pblk->lines[pblk_ppa_to_line(ppa)];
  151. kref_put(&line->ref, pblk_line_put_wq);
  152. }
  153. }
  154. static void pblk_end_user_read(struct bio *bio)
  155. {
  156. #ifdef CONFIG_NVM_DEBUG
  157. WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
  158. #endif
  159. bio_endio(bio);
  160. }
  161. static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
  162. bool put_line)
  163. {
  164. struct nvm_tgt_dev *dev = pblk->dev;
  165. struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  166. struct bio *int_bio = rqd->bio;
  167. unsigned long start_time = r_ctx->start_time;
  168. generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
  169. if (rqd->error)
  170. pblk_log_read_err(pblk, rqd);
  171. pblk_read_check_seq(pblk, rqd, r_ctx->lba);
  172. if (int_bio)
  173. bio_put(int_bio);
  174. if (put_line)
  175. pblk_read_put_rqd_kref(pblk, rqd);
  176. #ifdef CONFIG_NVM_DEBUG
  177. atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
  178. atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
  179. #endif
  180. pblk_free_rqd(pblk, rqd, PBLK_READ);
  181. atomic_dec(&pblk->inflight_io);
  182. }
  183. static void pblk_end_io_read(struct nvm_rq *rqd)
  184. {
  185. struct pblk *pblk = rqd->private;
  186. struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  187. struct bio *bio = (struct bio *)r_ctx->private;
  188. pblk_end_user_read(bio);
  189. __pblk_end_io_read(pblk, rqd, true);
  190. }
  191. static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
  192. struct bio *orig_bio, unsigned int bio_init_idx,
  193. unsigned long *read_bitmap)
  194. {
  195. struct pblk_sec_meta *meta_list = rqd->meta_list;
  196. struct bio *new_bio;
  197. struct bio_vec src_bv, dst_bv;
  198. void *ppa_ptr = NULL;
  199. void *src_p, *dst_p;
  200. dma_addr_t dma_ppa_list = 0;
  201. __le64 *lba_list_mem, *lba_list_media;
  202. int nr_secs = rqd->nr_ppas;
  203. int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
  204. int i, ret, hole;
  205. /* Re-use allocated memory for intermediate lbas */
  206. lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
  207. lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
  208. new_bio = bio_alloc(GFP_KERNEL, nr_holes);
  209. if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
  210. goto fail_add_pages;
  211. if (nr_holes != new_bio->bi_vcnt) {
  212. pr_err("pblk: malformed bio\n");
  213. goto fail;
  214. }
  215. for (i = 0; i < nr_secs; i++)
  216. lba_list_mem[i] = meta_list[i].lba;
  217. new_bio->bi_iter.bi_sector = 0; /* internal bio */
  218. bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
  219. rqd->bio = new_bio;
  220. rqd->nr_ppas = nr_holes;
  221. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  222. if (unlikely(nr_holes == 1)) {
  223. ppa_ptr = rqd->ppa_list;
  224. dma_ppa_list = rqd->dma_ppa_list;
  225. rqd->ppa_addr = rqd->ppa_list[0];
  226. }
  227. ret = pblk_submit_io_sync(pblk, rqd);
  228. if (ret) {
  229. bio_put(rqd->bio);
  230. pr_err("pblk: sync read IO submission failed\n");
  231. goto fail;
  232. }
  233. if (rqd->error) {
  234. atomic_long_inc(&pblk->read_failed);
  235. #ifdef CONFIG_NVM_DEBUG
  236. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  237. #endif
  238. }
  239. if (unlikely(nr_holes == 1)) {
  240. struct ppa_addr ppa;
  241. ppa = rqd->ppa_addr;
  242. rqd->ppa_list = ppa_ptr;
  243. rqd->dma_ppa_list = dma_ppa_list;
  244. rqd->ppa_list[0] = ppa;
  245. }
  246. for (i = 0; i < nr_secs; i++) {
  247. lba_list_media[i] = meta_list[i].lba;
  248. meta_list[i].lba = lba_list_mem[i];
  249. }
  250. /* Fill the holes in the original bio */
  251. i = 0;
  252. hole = find_first_zero_bit(read_bitmap, nr_secs);
  253. do {
  254. int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
  255. struct pblk_line *line = &pblk->lines[line_id];
  256. kref_put(&line->ref, pblk_line_put);
  257. meta_list[hole].lba = lba_list_media[i];
  258. src_bv = new_bio->bi_io_vec[i++];
  259. dst_bv = orig_bio->bi_io_vec[bio_init_idx + hole];
  260. src_p = kmap_atomic(src_bv.bv_page);
  261. dst_p = kmap_atomic(dst_bv.bv_page);
  262. memcpy(dst_p + dst_bv.bv_offset,
  263. src_p + src_bv.bv_offset,
  264. PBLK_EXPOSED_PAGE_SIZE);
  265. kunmap_atomic(src_p);
  266. kunmap_atomic(dst_p);
  267. mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
  268. hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
  269. } while (hole < nr_secs);
  270. bio_put(new_bio);
  271. /* restore original request */
  272. rqd->bio = NULL;
  273. rqd->nr_ppas = nr_secs;
  274. __pblk_end_io_read(pblk, rqd, false);
  275. return NVM_IO_DONE;
  276. fail:
  277. /* Free allocated pages in new bio */
  278. pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
  279. fail_add_pages:
  280. pr_err("pblk: failed to perform partial read\n");
  281. __pblk_end_io_read(pblk, rqd, false);
  282. return NVM_IO_ERR;
  283. }
  284. static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
  285. sector_t lba, unsigned long *read_bitmap)
  286. {
  287. struct pblk_sec_meta *meta_list = rqd->meta_list;
  288. struct ppa_addr ppa;
  289. pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
  290. #ifdef CONFIG_NVM_DEBUG
  291. atomic_long_inc(&pblk->inflight_reads);
  292. #endif
  293. retry:
  294. if (pblk_ppa_empty(ppa)) {
  295. WARN_ON(test_and_set_bit(0, read_bitmap));
  296. meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
  297. return;
  298. }
  299. /* Try to read from write buffer. The address is later checked on the
  300. * write buffer to prevent retrieving overwritten data.
  301. */
  302. if (pblk_addr_in_cache(ppa)) {
  303. if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
  304. pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
  305. goto retry;
  306. }
  307. WARN_ON(test_and_set_bit(0, read_bitmap));
  308. meta_list[0].lba = cpu_to_le64(lba);
  309. #ifdef CONFIG_NVM_DEBUG
  310. atomic_long_inc(&pblk->cache_reads);
  311. #endif
  312. } else {
  313. rqd->ppa_addr = ppa;
  314. }
  315. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  316. }
  317. int pblk_submit_read(struct pblk *pblk, struct bio *bio)
  318. {
  319. struct nvm_tgt_dev *dev = pblk->dev;
  320. struct request_queue *q = dev->q;
  321. sector_t blba = pblk_get_lba(bio);
  322. unsigned int nr_secs = pblk_get_secs(bio);
  323. struct pblk_g_ctx *r_ctx;
  324. struct nvm_rq *rqd;
  325. unsigned int bio_init_idx;
  326. unsigned long read_bitmap; /* Max 64 ppas per request */
  327. int ret = NVM_IO_ERR;
  328. /* logic error: lba out-of-bounds. Ignore read request */
  329. if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
  330. WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
  331. (unsigned long long)blba, nr_secs);
  332. return NVM_IO_ERR;
  333. }
  334. generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
  335. bitmap_zero(&read_bitmap, nr_secs);
  336. rqd = pblk_alloc_rqd(pblk, PBLK_READ);
  337. rqd->opcode = NVM_OP_PREAD;
  338. rqd->nr_ppas = nr_secs;
  339. rqd->bio = NULL; /* cloned bio if needed */
  340. rqd->private = pblk;
  341. rqd->end_io = pblk_end_io_read;
  342. r_ctx = nvm_rq_to_pdu(rqd);
  343. r_ctx->start_time = jiffies;
  344. r_ctx->lba = blba;
  345. r_ctx->private = bio; /* original bio */
  346. /* Save the index for this bio's start. This is needed in case
  347. * we need to fill a partial read.
  348. */
  349. bio_init_idx = pblk_get_bi_idx(bio);
  350. rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  351. &rqd->dma_meta_list);
  352. if (!rqd->meta_list) {
  353. pr_err("pblk: not able to allocate ppa list\n");
  354. goto fail_rqd_free;
  355. }
  356. if (nr_secs > 1) {
  357. rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
  358. rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
  359. pblk_read_ppalist_rq(pblk, rqd, bio, blba, &read_bitmap);
  360. } else {
  361. pblk_read_rq(pblk, rqd, bio, blba, &read_bitmap);
  362. }
  363. if (bitmap_full(&read_bitmap, nr_secs)) {
  364. atomic_inc(&pblk->inflight_io);
  365. __pblk_end_io_read(pblk, rqd, false);
  366. return NVM_IO_DONE;
  367. }
  368. /* All sectors are to be read from the device */
  369. if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
  370. struct bio *int_bio = NULL;
  371. /* Clone read bio to deal with read errors internally */
  372. int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
  373. if (!int_bio) {
  374. pr_err("pblk: could not clone read bio\n");
  375. goto fail_end_io;
  376. }
  377. rqd->bio = int_bio;
  378. if (pblk_submit_io(pblk, rqd)) {
  379. pr_err("pblk: read IO submission failed\n");
  380. ret = NVM_IO_ERR;
  381. goto fail_end_io;
  382. }
  383. return NVM_IO_OK;
  384. }
  385. /* The read bio request could be partially filled by the write buffer,
  386. * but there are some holes that need to be read from the drive.
  387. */
  388. return pblk_partial_read(pblk, rqd, bio, bio_init_idx, &read_bitmap);
  389. fail_rqd_free:
  390. pblk_free_rqd(pblk, rqd, PBLK_READ);
  391. return ret;
  392. fail_end_io:
  393. __pblk_end_io_read(pblk, rqd, false);
  394. return ret;
  395. }
  396. static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
  397. struct pblk_line *line, u64 *lba_list,
  398. u64 *paddr_list_gc, unsigned int nr_secs)
  399. {
  400. struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
  401. struct ppa_addr ppa_gc;
  402. int valid_secs = 0;
  403. int i;
  404. pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
  405. for (i = 0; i < nr_secs; i++) {
  406. if (lba_list[i] == ADDR_EMPTY)
  407. continue;
  408. ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
  409. if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
  410. paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
  411. continue;
  412. }
  413. rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
  414. }
  415. #ifdef CONFIG_NVM_DEBUG
  416. atomic_long_add(valid_secs, &pblk->inflight_reads);
  417. #endif
  418. return valid_secs;
  419. }
  420. static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
  421. struct pblk_line *line, sector_t lba,
  422. u64 paddr_gc)
  423. {
  424. struct ppa_addr ppa_l2p, ppa_gc;
  425. int valid_secs = 0;
  426. if (lba == ADDR_EMPTY)
  427. goto out;
  428. /* logic error: lba out-of-bounds */
  429. if (lba >= pblk->rl.nr_secs) {
  430. WARN(1, "pblk: read lba out of bounds\n");
  431. goto out;
  432. }
  433. spin_lock(&pblk->trans_lock);
  434. ppa_l2p = pblk_trans_map_get(pblk, lba);
  435. spin_unlock(&pblk->trans_lock);
  436. ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
  437. if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
  438. goto out;
  439. rqd->ppa_addr = ppa_l2p;
  440. valid_secs = 1;
  441. #ifdef CONFIG_NVM_DEBUG
  442. atomic_long_inc(&pblk->inflight_reads);
  443. #endif
  444. out:
  445. return valid_secs;
  446. }
  447. int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
  448. {
  449. struct nvm_tgt_dev *dev = pblk->dev;
  450. struct nvm_geo *geo = &dev->geo;
  451. struct bio *bio;
  452. struct nvm_rq rqd;
  453. int data_len;
  454. int ret = NVM_IO_OK;
  455. memset(&rqd, 0, sizeof(struct nvm_rq));
  456. rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  457. &rqd.dma_meta_list);
  458. if (!rqd.meta_list)
  459. return -ENOMEM;
  460. if (gc_rq->nr_secs > 1) {
  461. rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
  462. rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
  463. gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
  464. gc_rq->lba_list,
  465. gc_rq->paddr_list,
  466. gc_rq->nr_secs);
  467. if (gc_rq->secs_to_gc == 1)
  468. rqd.ppa_addr = rqd.ppa_list[0];
  469. } else {
  470. gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
  471. gc_rq->lba_list[0],
  472. gc_rq->paddr_list[0]);
  473. }
  474. if (!(gc_rq->secs_to_gc))
  475. goto out;
  476. data_len = (gc_rq->secs_to_gc) * geo->csecs;
  477. bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
  478. PBLK_VMALLOC_META, GFP_KERNEL);
  479. if (IS_ERR(bio)) {
  480. pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
  481. goto err_free_dma;
  482. }
  483. bio->bi_iter.bi_sector = 0; /* internal bio */
  484. bio_set_op_attrs(bio, REQ_OP_READ, 0);
  485. rqd.opcode = NVM_OP_PREAD;
  486. rqd.nr_ppas = gc_rq->secs_to_gc;
  487. rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  488. rqd.bio = bio;
  489. if (pblk_submit_io_sync(pblk, &rqd)) {
  490. ret = -EIO;
  491. pr_err("pblk: GC read request failed\n");
  492. goto err_free_bio;
  493. }
  494. pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
  495. atomic_dec(&pblk->inflight_io);
  496. if (rqd.error) {
  497. atomic_long_inc(&pblk->read_failed_gc);
  498. #ifdef CONFIG_NVM_DEBUG
  499. pblk_print_failed_rqd(pblk, &rqd, rqd.error);
  500. #endif
  501. }
  502. #ifdef CONFIG_NVM_DEBUG
  503. atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
  504. atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
  505. atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
  506. #endif
  507. out:
  508. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  509. return ret;
  510. err_free_bio:
  511. bio_put(bio);
  512. err_free_dma:
  513. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  514. return ret;
  515. }