pblk-read.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-read.c - pblk's read path
  16. */
  17. #include "pblk.h"
  18. /*
  19. * There is no guarantee that the value read from cache has not been updated and
  20. * resides at another location in the cache. We guarantee though that if the
  21. * value is read from the cache, it belongs to the mapped lba. In order to
  22. * guarantee and order between writes and reads are ordered, a flush must be
  23. * issued.
  24. */
  25. static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
  26. sector_t lba, struct ppa_addr ppa,
  27. int bio_iter, bool advanced_bio)
  28. {
  29. #ifdef CONFIG_NVM_PBLK_DEBUG
  30. /* Callers must ensure that the ppa points to a cache address */
  31. BUG_ON(pblk_ppa_empty(ppa));
  32. BUG_ON(!pblk_addr_in_cache(ppa));
  33. #endif
  34. return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
  35. bio_iter, advanced_bio);
  36. }
  37. static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
  38. struct bio *bio, sector_t blba,
  39. unsigned long *read_bitmap)
  40. {
  41. struct pblk_sec_meta *meta_list = rqd->meta_list;
  42. struct ppa_addr ppas[NVM_MAX_VLBA];
  43. int nr_secs = rqd->nr_ppas;
  44. bool advanced_bio = false;
  45. int i, j = 0;
  46. pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
  47. for (i = 0; i < nr_secs; i++) {
  48. struct ppa_addr p = ppas[i];
  49. sector_t lba = blba + i;
  50. retry:
  51. if (pblk_ppa_empty(p)) {
  52. WARN_ON(test_and_set_bit(i, read_bitmap));
  53. meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
  54. if (unlikely(!advanced_bio)) {
  55. bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
  56. advanced_bio = true;
  57. }
  58. goto next;
  59. }
  60. /* Try to read from write buffer. The address is later checked
  61. * on the write buffer to prevent retrieving overwritten data.
  62. */
  63. if (pblk_addr_in_cache(p)) {
  64. if (!pblk_read_from_cache(pblk, bio, lba, p, i,
  65. advanced_bio)) {
  66. pblk_lookup_l2p_seq(pblk, &p, lba, 1);
  67. goto retry;
  68. }
  69. WARN_ON(test_and_set_bit(i, read_bitmap));
  70. meta_list[i].lba = cpu_to_le64(lba);
  71. advanced_bio = true;
  72. #ifdef CONFIG_NVM_PBLK_DEBUG
  73. atomic_long_inc(&pblk->cache_reads);
  74. #endif
  75. } else {
  76. /* Read from media non-cached sectors */
  77. rqd->ppa_list[j++] = p;
  78. }
  79. next:
  80. if (advanced_bio)
  81. bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
  82. }
  83. if (pblk_io_aligned(pblk, nr_secs))
  84. rqd->is_seq = 1;
  85. #ifdef CONFIG_NVM_PBLK_DEBUG
  86. atomic_long_add(nr_secs, &pblk->inflight_reads);
  87. #endif
  88. }
  89. static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
  90. sector_t blba)
  91. {
  92. struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
  93. int nr_lbas = rqd->nr_ppas;
  94. int i;
  95. for (i = 0; i < nr_lbas; i++) {
  96. u64 lba = le64_to_cpu(meta_lba_list[i].lba);
  97. if (lba == ADDR_EMPTY)
  98. continue;
  99. if (lba != blba + i) {
  100. #ifdef CONFIG_NVM_PBLK_DEBUG
  101. struct ppa_addr *p;
  102. p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
  103. print_ppa(pblk, p, "seq", i);
  104. #endif
  105. pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
  106. lba, (u64)blba + i);
  107. WARN_ON(1);
  108. }
  109. }
  110. }
  111. /*
  112. * There can be holes in the lba list.
  113. */
  114. static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
  115. u64 *lba_list, int nr_lbas)
  116. {
  117. struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
  118. int i, j;
  119. for (i = 0, j = 0; i < nr_lbas; i++) {
  120. u64 lba = lba_list[i];
  121. u64 meta_lba;
  122. if (lba == ADDR_EMPTY)
  123. continue;
  124. meta_lba = le64_to_cpu(meta_lba_list[j].lba);
  125. if (lba != meta_lba) {
  126. #ifdef CONFIG_NVM_PBLK_DEBUG
  127. struct ppa_addr *p;
  128. int nr_ppas = rqd->nr_ppas;
  129. p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
  130. print_ppa(pblk, p, "seq", j);
  131. #endif
  132. pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
  133. lba, meta_lba);
  134. WARN_ON(1);
  135. }
  136. j++;
  137. }
  138. WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
  139. }
  140. static void pblk_end_user_read(struct bio *bio)
  141. {
  142. #ifdef CONFIG_NVM_PBLK_DEBUG
  143. WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
  144. #endif
  145. bio_endio(bio);
  146. }
  147. static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
  148. bool put_line)
  149. {
  150. struct nvm_tgt_dev *dev = pblk->dev;
  151. struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  152. struct bio *int_bio = rqd->bio;
  153. unsigned long start_time = r_ctx->start_time;
  154. generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
  155. if (rqd->error)
  156. pblk_log_read_err(pblk, rqd);
  157. pblk_read_check_seq(pblk, rqd, r_ctx->lba);
  158. if (int_bio)
  159. bio_put(int_bio);
  160. if (put_line)
  161. pblk_rq_to_line_put(pblk, rqd);
  162. #ifdef CONFIG_NVM_PBLK_DEBUG
  163. atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
  164. atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
  165. #endif
  166. pblk_free_rqd(pblk, rqd, PBLK_READ);
  167. atomic_dec(&pblk->inflight_io);
  168. }
  169. static void pblk_end_io_read(struct nvm_rq *rqd)
  170. {
  171. struct pblk *pblk = rqd->private;
  172. struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  173. struct bio *bio = (struct bio *)r_ctx->private;
  174. pblk_end_user_read(bio);
  175. __pblk_end_io_read(pblk, rqd, true);
  176. }
  177. static void pblk_end_partial_read(struct nvm_rq *rqd)
  178. {
  179. struct pblk *pblk = rqd->private;
  180. struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  181. struct pblk_pr_ctx *pr_ctx = r_ctx->private;
  182. struct bio *new_bio = rqd->bio;
  183. struct bio *bio = pr_ctx->orig_bio;
  184. struct bio_vec src_bv, dst_bv;
  185. struct pblk_sec_meta *meta_list = rqd->meta_list;
  186. int bio_init_idx = pr_ctx->bio_init_idx;
  187. unsigned long *read_bitmap = pr_ctx->bitmap;
  188. int nr_secs = pr_ctx->orig_nr_secs;
  189. int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
  190. __le64 *lba_list_mem, *lba_list_media;
  191. void *src_p, *dst_p;
  192. int hole, i;
  193. if (unlikely(nr_holes == 1)) {
  194. struct ppa_addr ppa;
  195. ppa = rqd->ppa_addr;
  196. rqd->ppa_list = pr_ctx->ppa_ptr;
  197. rqd->dma_ppa_list = pr_ctx->dma_ppa_list;
  198. rqd->ppa_list[0] = ppa;
  199. }
  200. /* Re-use allocated memory for intermediate lbas */
  201. lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
  202. lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
  203. for (i = 0; i < nr_secs; i++) {
  204. lba_list_media[i] = meta_list[i].lba;
  205. meta_list[i].lba = lba_list_mem[i];
  206. }
  207. /* Fill the holes in the original bio */
  208. i = 0;
  209. hole = find_first_zero_bit(read_bitmap, nr_secs);
  210. do {
  211. struct pblk_line *line;
  212. line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
  213. kref_put(&line->ref, pblk_line_put);
  214. meta_list[hole].lba = lba_list_media[i];
  215. src_bv = new_bio->bi_io_vec[i++];
  216. dst_bv = bio->bi_io_vec[bio_init_idx + hole];
  217. src_p = kmap_atomic(src_bv.bv_page);
  218. dst_p = kmap_atomic(dst_bv.bv_page);
  219. memcpy(dst_p + dst_bv.bv_offset,
  220. src_p + src_bv.bv_offset,
  221. PBLK_EXPOSED_PAGE_SIZE);
  222. kunmap_atomic(src_p);
  223. kunmap_atomic(dst_p);
  224. mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
  225. hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
  226. } while (hole < nr_secs);
  227. bio_put(new_bio);
  228. kfree(pr_ctx);
  229. /* restore original request */
  230. rqd->bio = NULL;
  231. rqd->nr_ppas = nr_secs;
  232. bio_endio(bio);
  233. __pblk_end_io_read(pblk, rqd, false);
  234. }
  235. static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
  236. unsigned int bio_init_idx,
  237. unsigned long *read_bitmap,
  238. int nr_holes)
  239. {
  240. struct pblk_sec_meta *meta_list = rqd->meta_list;
  241. struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  242. struct pblk_pr_ctx *pr_ctx;
  243. struct bio *new_bio, *bio = r_ctx->private;
  244. __le64 *lba_list_mem;
  245. int nr_secs = rqd->nr_ppas;
  246. int i;
  247. /* Re-use allocated memory for intermediate lbas */
  248. lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
  249. new_bio = bio_alloc(GFP_KERNEL, nr_holes);
  250. if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
  251. goto fail_bio_put;
  252. if (nr_holes != new_bio->bi_vcnt) {
  253. WARN_ONCE(1, "pblk: malformed bio\n");
  254. goto fail_free_pages;
  255. }
  256. pr_ctx = kmalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
  257. if (!pr_ctx)
  258. goto fail_free_pages;
  259. for (i = 0; i < nr_secs; i++)
  260. lba_list_mem[i] = meta_list[i].lba;
  261. new_bio->bi_iter.bi_sector = 0; /* internal bio */
  262. bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
  263. rqd->bio = new_bio;
  264. rqd->nr_ppas = nr_holes;
  265. pr_ctx->ppa_ptr = NULL;
  266. pr_ctx->orig_bio = bio;
  267. bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
  268. pr_ctx->bio_init_idx = bio_init_idx;
  269. pr_ctx->orig_nr_secs = nr_secs;
  270. r_ctx->private = pr_ctx;
  271. if (unlikely(nr_holes == 1)) {
  272. pr_ctx->ppa_ptr = rqd->ppa_list;
  273. pr_ctx->dma_ppa_list = rqd->dma_ppa_list;
  274. rqd->ppa_addr = rqd->ppa_list[0];
  275. }
  276. return 0;
  277. fail_free_pages:
  278. pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
  279. fail_bio_put:
  280. bio_put(new_bio);
  281. return -ENOMEM;
  282. }
  283. static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
  284. unsigned int bio_init_idx,
  285. unsigned long *read_bitmap, int nr_secs)
  286. {
  287. int nr_holes;
  288. int ret;
  289. nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
  290. if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap,
  291. nr_holes))
  292. return NVM_IO_ERR;
  293. rqd->end_io = pblk_end_partial_read;
  294. ret = pblk_submit_io(pblk, rqd);
  295. if (ret) {
  296. bio_put(rqd->bio);
  297. pblk_err(pblk, "partial read IO submission failed\n");
  298. goto err;
  299. }
  300. return NVM_IO_OK;
  301. err:
  302. pblk_err(pblk, "failed to perform partial read\n");
  303. /* Free allocated pages in new bio */
  304. pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
  305. __pblk_end_io_read(pblk, rqd, false);
  306. return NVM_IO_ERR;
  307. }
  308. static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
  309. sector_t lba, unsigned long *read_bitmap)
  310. {
  311. struct pblk_sec_meta *meta_list = rqd->meta_list;
  312. struct ppa_addr ppa;
  313. pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
  314. #ifdef CONFIG_NVM_PBLK_DEBUG
  315. atomic_long_inc(&pblk->inflight_reads);
  316. #endif
  317. retry:
  318. if (pblk_ppa_empty(ppa)) {
  319. WARN_ON(test_and_set_bit(0, read_bitmap));
  320. meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
  321. return;
  322. }
  323. /* Try to read from write buffer. The address is later checked on the
  324. * write buffer to prevent retrieving overwritten data.
  325. */
  326. if (pblk_addr_in_cache(ppa)) {
  327. if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
  328. pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
  329. goto retry;
  330. }
  331. WARN_ON(test_and_set_bit(0, read_bitmap));
  332. meta_list[0].lba = cpu_to_le64(lba);
  333. #ifdef CONFIG_NVM_PBLK_DEBUG
  334. atomic_long_inc(&pblk->cache_reads);
  335. #endif
  336. } else {
  337. rqd->ppa_addr = ppa;
  338. }
  339. }
  340. int pblk_submit_read(struct pblk *pblk, struct bio *bio)
  341. {
  342. struct nvm_tgt_dev *dev = pblk->dev;
  343. struct request_queue *q = dev->q;
  344. sector_t blba = pblk_get_lba(bio);
  345. unsigned int nr_secs = pblk_get_secs(bio);
  346. struct pblk_g_ctx *r_ctx;
  347. struct nvm_rq *rqd;
  348. unsigned int bio_init_idx;
  349. DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
  350. int ret = NVM_IO_ERR;
  351. generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
  352. &pblk->disk->part0);
  353. bitmap_zero(read_bitmap, nr_secs);
  354. rqd = pblk_alloc_rqd(pblk, PBLK_READ);
  355. rqd->opcode = NVM_OP_PREAD;
  356. rqd->nr_ppas = nr_secs;
  357. rqd->bio = NULL; /* cloned bio if needed */
  358. rqd->private = pblk;
  359. rqd->end_io = pblk_end_io_read;
  360. r_ctx = nvm_rq_to_pdu(rqd);
  361. r_ctx->start_time = jiffies;
  362. r_ctx->lba = blba;
  363. r_ctx->private = bio; /* original bio */
  364. /* Save the index for this bio's start. This is needed in case
  365. * we need to fill a partial read.
  366. */
  367. bio_init_idx = pblk_get_bi_idx(bio);
  368. rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  369. &rqd->dma_meta_list);
  370. if (!rqd->meta_list) {
  371. pblk_err(pblk, "not able to allocate ppa list\n");
  372. goto fail_rqd_free;
  373. }
  374. if (nr_secs > 1) {
  375. rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
  376. rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
  377. pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
  378. } else {
  379. pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
  380. }
  381. if (bitmap_full(read_bitmap, nr_secs)) {
  382. atomic_inc(&pblk->inflight_io);
  383. __pblk_end_io_read(pblk, rqd, false);
  384. return NVM_IO_DONE;
  385. }
  386. /* All sectors are to be read from the device */
  387. if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
  388. struct bio *int_bio = NULL;
  389. /* Clone read bio to deal with read errors internally */
  390. int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
  391. if (!int_bio) {
  392. pblk_err(pblk, "could not clone read bio\n");
  393. goto fail_end_io;
  394. }
  395. rqd->bio = int_bio;
  396. if (pblk_submit_io(pblk, rqd)) {
  397. pblk_err(pblk, "read IO submission failed\n");
  398. ret = NVM_IO_ERR;
  399. goto fail_end_io;
  400. }
  401. return NVM_IO_OK;
  402. }
  403. /* The read bio request could be partially filled by the write buffer,
  404. * but there are some holes that need to be read from the drive.
  405. */
  406. ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
  407. nr_secs);
  408. if (ret)
  409. goto fail_meta_free;
  410. return NVM_IO_OK;
  411. fail_meta_free:
  412. nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
  413. fail_rqd_free:
  414. pblk_free_rqd(pblk, rqd, PBLK_READ);
  415. return ret;
  416. fail_end_io:
  417. __pblk_end_io_read(pblk, rqd, false);
  418. return ret;
  419. }
  420. static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
  421. struct pblk_line *line, u64 *lba_list,
  422. u64 *paddr_list_gc, unsigned int nr_secs)
  423. {
  424. struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
  425. struct ppa_addr ppa_gc;
  426. int valid_secs = 0;
  427. int i;
  428. pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
  429. for (i = 0; i < nr_secs; i++) {
  430. if (lba_list[i] == ADDR_EMPTY)
  431. continue;
  432. ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
  433. if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
  434. paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
  435. continue;
  436. }
  437. rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
  438. }
  439. #ifdef CONFIG_NVM_PBLK_DEBUG
  440. atomic_long_add(valid_secs, &pblk->inflight_reads);
  441. #endif
  442. return valid_secs;
  443. }
  444. static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
  445. struct pblk_line *line, sector_t lba,
  446. u64 paddr_gc)
  447. {
  448. struct ppa_addr ppa_l2p, ppa_gc;
  449. int valid_secs = 0;
  450. if (lba == ADDR_EMPTY)
  451. goto out;
  452. /* logic error: lba out-of-bounds */
  453. if (lba >= pblk->rl.nr_secs) {
  454. WARN(1, "pblk: read lba out of bounds\n");
  455. goto out;
  456. }
  457. spin_lock(&pblk->trans_lock);
  458. ppa_l2p = pblk_trans_map_get(pblk, lba);
  459. spin_unlock(&pblk->trans_lock);
  460. ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
  461. if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
  462. goto out;
  463. rqd->ppa_addr = ppa_l2p;
  464. valid_secs = 1;
  465. #ifdef CONFIG_NVM_PBLK_DEBUG
  466. atomic_long_inc(&pblk->inflight_reads);
  467. #endif
  468. out:
  469. return valid_secs;
  470. }
  471. int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
  472. {
  473. struct nvm_tgt_dev *dev = pblk->dev;
  474. struct nvm_geo *geo = &dev->geo;
  475. struct bio *bio;
  476. struct nvm_rq rqd;
  477. int data_len;
  478. int ret = NVM_IO_OK;
  479. memset(&rqd, 0, sizeof(struct nvm_rq));
  480. rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  481. &rqd.dma_meta_list);
  482. if (!rqd.meta_list)
  483. return -ENOMEM;
  484. if (gc_rq->nr_secs > 1) {
  485. rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
  486. rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
  487. gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
  488. gc_rq->lba_list,
  489. gc_rq->paddr_list,
  490. gc_rq->nr_secs);
  491. if (gc_rq->secs_to_gc == 1)
  492. rqd.ppa_addr = rqd.ppa_list[0];
  493. } else {
  494. gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
  495. gc_rq->lba_list[0],
  496. gc_rq->paddr_list[0]);
  497. }
  498. if (!(gc_rq->secs_to_gc))
  499. goto out;
  500. data_len = (gc_rq->secs_to_gc) * geo->csecs;
  501. bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
  502. PBLK_VMALLOC_META, GFP_KERNEL);
  503. if (IS_ERR(bio)) {
  504. pblk_err(pblk, "could not allocate GC bio (%lu)\n",
  505. PTR_ERR(bio));
  506. goto err_free_dma;
  507. }
  508. bio->bi_iter.bi_sector = 0; /* internal bio */
  509. bio_set_op_attrs(bio, REQ_OP_READ, 0);
  510. rqd.opcode = NVM_OP_PREAD;
  511. rqd.nr_ppas = gc_rq->secs_to_gc;
  512. rqd.bio = bio;
  513. if (pblk_submit_io_sync(pblk, &rqd)) {
  514. ret = -EIO;
  515. pblk_err(pblk, "GC read request failed\n");
  516. goto err_free_bio;
  517. }
  518. pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
  519. atomic_dec(&pblk->inflight_io);
  520. if (rqd.error) {
  521. atomic_long_inc(&pblk->read_failed_gc);
  522. #ifdef CONFIG_NVM_PBLK_DEBUG
  523. pblk_print_failed_rqd(pblk, &rqd, rqd.error);
  524. #endif
  525. }
  526. #ifdef CONFIG_NVM_PBLK_DEBUG
  527. atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
  528. atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
  529. atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
  530. #endif
  531. out:
  532. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  533. return ret;
  534. err_free_bio:
  535. bio_put(bio);
  536. err_free_dma:
  537. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  538. return ret;
  539. }