pblk-read.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-read.c - pblk's read path
  16. */
  17. #include "pblk.h"
  18. /*
  19. * There is no guarantee that the value read from cache has not been updated and
  20. * resides at another location in the cache. We guarantee though that if the
  21. * value is read from the cache, it belongs to the mapped lba. In order to
  22. * guarantee and order between writes and reads are ordered, a flush must be
  23. * issued.
  24. */
  25. static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
  26. sector_t lba, struct ppa_addr ppa,
  27. int bio_iter)
  28. {
  29. #ifdef CONFIG_NVM_DEBUG
  30. /* Callers must ensure that the ppa points to a cache address */
  31. BUG_ON(pblk_ppa_empty(ppa));
  32. BUG_ON(!pblk_addr_in_cache(ppa));
  33. #endif
  34. return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba,
  35. pblk_addr_to_cacheline(ppa), bio_iter);
  36. }
  37. static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
  38. unsigned long *read_bitmap)
  39. {
  40. struct bio *bio = rqd->bio;
  41. struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
  42. sector_t blba = pblk_get_lba(bio);
  43. int nr_secs = rqd->nr_ppas;
  44. int advanced_bio = 0;
  45. int i, j = 0;
  46. /* logic error: lba out-of-bounds. Ignore read request */
  47. if (blba + nr_secs >= pblk->rl.nr_secs) {
  48. WARN(1, "pblk: read lbas out of bounds\n");
  49. return;
  50. }
  51. pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
  52. for (i = 0; i < nr_secs; i++) {
  53. struct ppa_addr p = ppas[i];
  54. sector_t lba = blba + i;
  55. retry:
  56. if (pblk_ppa_empty(p)) {
  57. WARN_ON(test_and_set_bit(i, read_bitmap));
  58. continue;
  59. }
  60. /* Try to read from write buffer. The address is later checked
  61. * on the write buffer to prevent retrieving overwritten data.
  62. */
  63. if (pblk_addr_in_cache(p)) {
  64. if (!pblk_read_from_cache(pblk, bio, lba, p, i)) {
  65. pblk_lookup_l2p_seq(pblk, &p, lba, 1);
  66. goto retry;
  67. }
  68. WARN_ON(test_and_set_bit(i, read_bitmap));
  69. advanced_bio = 1;
  70. } else {
  71. /* Read from media non-cached sectors */
  72. rqd->ppa_list[j++] = p;
  73. }
  74. if (advanced_bio)
  75. bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
  76. }
  77. #ifdef CONFIG_NVM_DEBUG
  78. atomic_long_add(nr_secs, &pblk->inflight_reads);
  79. #endif
  80. }
  81. static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
  82. {
  83. int err;
  84. rqd->flags = pblk_set_read_mode(pblk);
  85. err = pblk_submit_io(pblk, rqd);
  86. if (err)
  87. return NVM_IO_ERR;
  88. return NVM_IO_OK;
  89. }
  90. static void pblk_end_io_read(struct nvm_rq *rqd)
  91. {
  92. struct pblk *pblk = rqd->private;
  93. struct nvm_tgt_dev *dev = pblk->dev;
  94. struct pblk_r_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  95. struct bio *bio = rqd->bio;
  96. if (rqd->error)
  97. pblk_log_read_err(pblk, rqd);
  98. #ifdef CONFIG_NVM_DEBUG
  99. else
  100. WARN_ONCE(bio->bi_error, "pblk: corrupted read error\n");
  101. #endif
  102. if (rqd->nr_ppas > 1)
  103. nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
  104. bio_put(bio);
  105. if (r_ctx->orig_bio) {
  106. #ifdef CONFIG_NVM_DEBUG
  107. WARN_ONCE(r_ctx->orig_bio->bi_error,
  108. "pblk: corrupted read bio\n");
  109. #endif
  110. bio_endio(r_ctx->orig_bio);
  111. bio_put(r_ctx->orig_bio);
  112. }
  113. #ifdef CONFIG_NVM_DEBUG
  114. atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
  115. atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
  116. #endif
  117. pblk_free_rqd(pblk, rqd, READ);
  118. }
  119. static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
  120. unsigned int bio_init_idx,
  121. unsigned long *read_bitmap)
  122. {
  123. struct bio *new_bio, *bio = rqd->bio;
  124. struct bio_vec src_bv, dst_bv;
  125. void *ppa_ptr = NULL;
  126. void *src_p, *dst_p;
  127. dma_addr_t dma_ppa_list = 0;
  128. int nr_secs = rqd->nr_ppas;
  129. int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
  130. int i, ret, hole;
  131. DECLARE_COMPLETION_ONSTACK(wait);
  132. new_bio = bio_alloc(GFP_KERNEL, nr_holes);
  133. if (!new_bio) {
  134. pr_err("pblk: could not alloc read bio\n");
  135. return NVM_IO_ERR;
  136. }
  137. if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
  138. goto err;
  139. if (nr_holes != new_bio->bi_vcnt) {
  140. pr_err("pblk: malformed bio\n");
  141. goto err;
  142. }
  143. new_bio->bi_iter.bi_sector = 0; /* internal bio */
  144. bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
  145. new_bio->bi_private = &wait;
  146. new_bio->bi_end_io = pblk_end_bio_sync;
  147. rqd->bio = new_bio;
  148. rqd->nr_ppas = nr_holes;
  149. rqd->end_io = NULL;
  150. if (unlikely(nr_secs > 1 && nr_holes == 1)) {
  151. ppa_ptr = rqd->ppa_list;
  152. dma_ppa_list = rqd->dma_ppa_list;
  153. rqd->ppa_addr = rqd->ppa_list[0];
  154. }
  155. ret = pblk_submit_read_io(pblk, rqd);
  156. if (ret) {
  157. bio_put(rqd->bio);
  158. pr_err("pblk: read IO submission failed\n");
  159. goto err;
  160. }
  161. if (!wait_for_completion_io_timeout(&wait,
  162. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  163. pr_err("pblk: partial read I/O timed out\n");
  164. }
  165. if (rqd->error) {
  166. atomic_long_inc(&pblk->read_failed);
  167. #ifdef CONFIG_NVM_DEBUG
  168. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  169. #endif
  170. }
  171. if (unlikely(nr_secs > 1 && nr_holes == 1)) {
  172. rqd->ppa_list = ppa_ptr;
  173. rqd->dma_ppa_list = dma_ppa_list;
  174. }
  175. /* Fill the holes in the original bio */
  176. i = 0;
  177. hole = find_first_zero_bit(read_bitmap, nr_secs);
  178. do {
  179. src_bv = new_bio->bi_io_vec[i++];
  180. dst_bv = bio->bi_io_vec[bio_init_idx + hole];
  181. src_p = kmap_atomic(src_bv.bv_page);
  182. dst_p = kmap_atomic(dst_bv.bv_page);
  183. memcpy(dst_p + dst_bv.bv_offset,
  184. src_p + src_bv.bv_offset,
  185. PBLK_EXPOSED_PAGE_SIZE);
  186. kunmap_atomic(src_p);
  187. kunmap_atomic(dst_p);
  188. mempool_free(src_bv.bv_page, pblk->page_pool);
  189. hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
  190. } while (hole < nr_secs);
  191. bio_put(new_bio);
  192. /* Complete the original bio and associated request */
  193. rqd->bio = bio;
  194. rqd->nr_ppas = nr_secs;
  195. rqd->private = pblk;
  196. bio_endio(bio);
  197. pblk_end_io_read(rqd);
  198. return NVM_IO_OK;
  199. err:
  200. /* Free allocated pages in new bio */
  201. pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
  202. rqd->private = pblk;
  203. pblk_end_io_read(rqd);
  204. return NVM_IO_ERR;
  205. }
  206. static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
  207. unsigned long *read_bitmap)
  208. {
  209. struct bio *bio = rqd->bio;
  210. struct ppa_addr ppa;
  211. sector_t lba = pblk_get_lba(bio);
  212. /* logic error: lba out-of-bounds. Ignore read request */
  213. if (lba >= pblk->rl.nr_secs) {
  214. WARN(1, "pblk: read lba out of bounds\n");
  215. return;
  216. }
  217. pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
  218. #ifdef CONFIG_NVM_DEBUG
  219. atomic_long_inc(&pblk->inflight_reads);
  220. #endif
  221. retry:
  222. if (pblk_ppa_empty(ppa)) {
  223. WARN_ON(test_and_set_bit(0, read_bitmap));
  224. return;
  225. }
  226. /* Try to read from write buffer. The address is later checked on the
  227. * write buffer to prevent retrieving overwritten data.
  228. */
  229. if (pblk_addr_in_cache(ppa)) {
  230. if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) {
  231. pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
  232. goto retry;
  233. }
  234. WARN_ON(test_and_set_bit(0, read_bitmap));
  235. } else {
  236. rqd->ppa_addr = ppa;
  237. }
  238. }
  239. int pblk_submit_read(struct pblk *pblk, struct bio *bio)
  240. {
  241. struct nvm_tgt_dev *dev = pblk->dev;
  242. unsigned int nr_secs = pblk_get_secs(bio);
  243. struct nvm_rq *rqd;
  244. unsigned long read_bitmap; /* Max 64 ppas per request */
  245. unsigned int bio_init_idx;
  246. int ret = NVM_IO_ERR;
  247. if (nr_secs > PBLK_MAX_REQ_ADDRS)
  248. return NVM_IO_ERR;
  249. bitmap_zero(&read_bitmap, nr_secs);
  250. rqd = pblk_alloc_rqd(pblk, READ);
  251. if (IS_ERR(rqd)) {
  252. pr_err_ratelimited("pblk: not able to alloc rqd");
  253. return NVM_IO_ERR;
  254. }
  255. rqd->opcode = NVM_OP_PREAD;
  256. rqd->bio = bio;
  257. rqd->nr_ppas = nr_secs;
  258. rqd->private = pblk;
  259. rqd->end_io = pblk_end_io_read;
  260. /* Save the index for this bio's start. This is needed in case
  261. * we need to fill a partial read.
  262. */
  263. bio_init_idx = pblk_get_bi_idx(bio);
  264. if (nr_secs > 1) {
  265. rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  266. &rqd->dma_ppa_list);
  267. if (!rqd->ppa_list) {
  268. pr_err("pblk: not able to allocate ppa list\n");
  269. goto fail_rqd_free;
  270. }
  271. pblk_read_ppalist_rq(pblk, rqd, &read_bitmap);
  272. } else {
  273. pblk_read_rq(pblk, rqd, &read_bitmap);
  274. }
  275. bio_get(bio);
  276. if (bitmap_full(&read_bitmap, nr_secs)) {
  277. bio_endio(bio);
  278. pblk_end_io_read(rqd);
  279. return NVM_IO_OK;
  280. }
  281. /* All sectors are to be read from the device */
  282. if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
  283. struct bio *int_bio = NULL;
  284. struct pblk_r_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  285. /* Clone read bio to deal with read errors internally */
  286. int_bio = bio_clone_bioset(bio, GFP_KERNEL, fs_bio_set);
  287. if (!int_bio) {
  288. pr_err("pblk: could not clone read bio\n");
  289. return NVM_IO_ERR;
  290. }
  291. rqd->bio = int_bio;
  292. r_ctx->orig_bio = bio;
  293. ret = pblk_submit_read_io(pblk, rqd);
  294. if (ret) {
  295. pr_err("pblk: read IO submission failed\n");
  296. if (int_bio)
  297. bio_put(int_bio);
  298. return ret;
  299. }
  300. return NVM_IO_OK;
  301. }
  302. /* The read bio request could be partially filled by the write buffer,
  303. * but there are some holes that need to be read from the drive.
  304. */
  305. ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
  306. if (ret) {
  307. pr_err("pblk: failed to perform partial read\n");
  308. return ret;
  309. }
  310. return NVM_IO_OK;
  311. fail_rqd_free:
  312. pblk_free_rqd(pblk, rqd, READ);
  313. return ret;
  314. }
  315. static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
  316. struct pblk_line *line, u64 *lba_list,
  317. unsigned int nr_secs)
  318. {
  319. struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
  320. int valid_secs = 0;
  321. int i;
  322. pblk_lookup_l2p_rand(pblk, ppas, lba_list, nr_secs);
  323. for (i = 0; i < nr_secs; i++) {
  324. if (pblk_addr_in_cache(ppas[i]) || ppas[i].g.blk != line->id ||
  325. pblk_ppa_empty(ppas[i])) {
  326. lba_list[i] = ADDR_EMPTY;
  327. continue;
  328. }
  329. rqd->ppa_list[valid_secs++] = ppas[i];
  330. }
  331. #ifdef CONFIG_NVM_DEBUG
  332. atomic_long_add(valid_secs, &pblk->inflight_reads);
  333. #endif
  334. return valid_secs;
  335. }
  336. static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
  337. struct pblk_line *line, sector_t lba)
  338. {
  339. struct ppa_addr ppa;
  340. int valid_secs = 0;
  341. if (lba == ADDR_EMPTY)
  342. goto out;
  343. /* logic error: lba out-of-bounds */
  344. if (lba >= pblk->rl.nr_secs) {
  345. WARN(1, "pblk: read lba out of bounds\n");
  346. goto out;
  347. }
  348. spin_lock(&pblk->trans_lock);
  349. ppa = pblk_trans_map_get(pblk, lba);
  350. spin_unlock(&pblk->trans_lock);
  351. /* Ignore updated values until the moment */
  352. if (pblk_addr_in_cache(ppa) || ppa.g.blk != line->id ||
  353. pblk_ppa_empty(ppa))
  354. goto out;
  355. rqd->ppa_addr = ppa;
  356. valid_secs = 1;
  357. #ifdef CONFIG_NVM_DEBUG
  358. atomic_long_inc(&pblk->inflight_reads);
  359. #endif
  360. out:
  361. return valid_secs;
  362. }
  363. int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
  364. unsigned int nr_secs, unsigned int *secs_to_gc,
  365. struct pblk_line *line)
  366. {
  367. struct nvm_tgt_dev *dev = pblk->dev;
  368. struct nvm_geo *geo = &dev->geo;
  369. struct request_queue *q = dev->q;
  370. struct bio *bio;
  371. struct nvm_rq rqd;
  372. int ret, data_len;
  373. DECLARE_COMPLETION_ONSTACK(wait);
  374. memset(&rqd, 0, sizeof(struct nvm_rq));
  375. if (nr_secs > 1) {
  376. rqd.ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  377. &rqd.dma_ppa_list);
  378. if (!rqd.ppa_list)
  379. return NVM_IO_ERR;
  380. *secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, line, lba_list,
  381. nr_secs);
  382. if (*secs_to_gc == 1) {
  383. struct ppa_addr ppa;
  384. ppa = rqd.ppa_list[0];
  385. nvm_dev_dma_free(dev->parent, rqd.ppa_list,
  386. rqd.dma_ppa_list);
  387. rqd.ppa_addr = ppa;
  388. }
  389. } else {
  390. *secs_to_gc = read_rq_gc(pblk, &rqd, line, lba_list[0]);
  391. }
  392. if (!(*secs_to_gc))
  393. goto out;
  394. data_len = (*secs_to_gc) * geo->sec_size;
  395. bio = bio_map_kern(q, data, data_len, GFP_KERNEL);
  396. if (IS_ERR(bio)) {
  397. pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
  398. goto err_free_dma;
  399. }
  400. bio->bi_iter.bi_sector = 0; /* internal bio */
  401. bio_set_op_attrs(bio, REQ_OP_READ, 0);
  402. rqd.opcode = NVM_OP_PREAD;
  403. rqd.end_io = pblk_end_io_sync;
  404. rqd.private = &wait;
  405. rqd.nr_ppas = *secs_to_gc;
  406. rqd.bio = bio;
  407. ret = pblk_submit_read_io(pblk, &rqd);
  408. if (ret) {
  409. bio_endio(bio);
  410. pr_err("pblk: GC read request failed\n");
  411. goto err_free_dma;
  412. }
  413. if (!wait_for_completion_io_timeout(&wait,
  414. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  415. pr_err("pblk: GC read I/O timed out\n");
  416. }
  417. if (rqd.error) {
  418. atomic_long_inc(&pblk->read_failed_gc);
  419. #ifdef CONFIG_NVM_DEBUG
  420. pblk_print_failed_rqd(pblk, &rqd, rqd.error);
  421. #endif
  422. }
  423. #ifdef CONFIG_NVM_DEBUG
  424. atomic_long_add(*secs_to_gc, &pblk->sync_reads);
  425. atomic_long_add(*secs_to_gc, &pblk->recov_gc_reads);
  426. atomic_long_sub(*secs_to_gc, &pblk->inflight_reads);
  427. #endif
  428. out:
  429. if (rqd.nr_ppas > 1)
  430. nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
  431. return NVM_IO_OK;
  432. err_free_dma:
  433. if (rqd.nr_ppas > 1)
  434. nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
  435. return NVM_IO_ERR;
  436. }