pblk-read.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-read.c - pblk's read path
  16. */
  17. #include "pblk.h"
  18. /*
  19. * There is no guarantee that the value read from cache has not been updated and
  20. * resides at another location in the cache. We guarantee though that if the
  21. * value is read from the cache, it belongs to the mapped lba. In order to
  22. * guarantee and order between writes and reads are ordered, a flush must be
  23. * issued.
  24. */
  25. static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
  26. sector_t lba, struct ppa_addr ppa,
  27. int bio_iter)
  28. {
  29. #ifdef CONFIG_NVM_DEBUG
  30. /* Callers must ensure that the ppa points to a cache address */
  31. BUG_ON(pblk_ppa_empty(ppa));
  32. BUG_ON(!pblk_addr_in_cache(ppa));
  33. #endif
  34. return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter);
  35. }
  36. static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
  37. unsigned long *read_bitmap)
  38. {
  39. struct bio *bio = rqd->bio;
  40. struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
  41. sector_t blba = pblk_get_lba(bio);
  42. int nr_secs = rqd->nr_ppas;
  43. int advanced_bio = 0;
  44. int i, j = 0;
  45. /* logic error: lba out-of-bounds. Ignore read request */
  46. if (blba + nr_secs >= pblk->rl.nr_secs) {
  47. WARN(1, "pblk: read lbas out of bounds\n");
  48. return;
  49. }
  50. pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
  51. for (i = 0; i < nr_secs; i++) {
  52. struct ppa_addr p = ppas[i];
  53. sector_t lba = blba + i;
  54. retry:
  55. if (pblk_ppa_empty(p)) {
  56. WARN_ON(test_and_set_bit(i, read_bitmap));
  57. continue;
  58. }
  59. /* Try to read from write buffer. The address is later checked
  60. * on the write buffer to prevent retrieving overwritten data.
  61. */
  62. if (pblk_addr_in_cache(p)) {
  63. if (!pblk_read_from_cache(pblk, bio, lba, p, i)) {
  64. pblk_lookup_l2p_seq(pblk, &p, lba, 1);
  65. goto retry;
  66. }
  67. WARN_ON(test_and_set_bit(i, read_bitmap));
  68. advanced_bio = 1;
  69. #ifdef CONFIG_NVM_DEBUG
  70. atomic_long_inc(&pblk->cache_reads);
  71. #endif
  72. } else {
  73. /* Read from media non-cached sectors */
  74. rqd->ppa_list[j++] = p;
  75. }
  76. if (advanced_bio)
  77. bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
  78. }
  79. if (pblk_io_aligned(pblk, nr_secs))
  80. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
  81. else
  82. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  83. #ifdef CONFIG_NVM_DEBUG
  84. atomic_long_add(nr_secs, &pblk->inflight_reads);
  85. #endif
  86. }
  87. static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
  88. {
  89. int err;
  90. err = pblk_submit_io(pblk, rqd);
  91. if (err)
  92. return NVM_IO_ERR;
  93. return NVM_IO_OK;
  94. }
  95. static void pblk_end_io_read(struct nvm_rq *rqd)
  96. {
  97. struct pblk *pblk = rqd->private;
  98. struct nvm_tgt_dev *dev = pblk->dev;
  99. struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  100. struct bio *bio = rqd->bio;
  101. if (rqd->error)
  102. pblk_log_read_err(pblk, rqd);
  103. #ifdef CONFIG_NVM_DEBUG
  104. else
  105. WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
  106. #endif
  107. nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
  108. bio_put(bio);
  109. if (r_ctx->private) {
  110. struct bio *orig_bio = r_ctx->private;
  111. #ifdef CONFIG_NVM_DEBUG
  112. WARN_ONCE(orig_bio->bi_status, "pblk: corrupted read bio\n");
  113. #endif
  114. bio_endio(orig_bio);
  115. bio_put(orig_bio);
  116. }
  117. #ifdef CONFIG_NVM_DEBUG
  118. atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
  119. atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
  120. #endif
  121. pblk_free_rqd(pblk, rqd, READ);
  122. atomic_dec(&pblk->inflight_io);
  123. }
  124. static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
  125. unsigned int bio_init_idx,
  126. unsigned long *read_bitmap)
  127. {
  128. struct bio *new_bio, *bio = rqd->bio;
  129. struct bio_vec src_bv, dst_bv;
  130. void *ppa_ptr = NULL;
  131. void *src_p, *dst_p;
  132. dma_addr_t dma_ppa_list = 0;
  133. int nr_secs = rqd->nr_ppas;
  134. int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
  135. int i, ret, hole;
  136. DECLARE_COMPLETION_ONSTACK(wait);
  137. new_bio = bio_alloc(GFP_KERNEL, nr_holes);
  138. if (!new_bio) {
  139. pr_err("pblk: could not alloc read bio\n");
  140. return NVM_IO_ERR;
  141. }
  142. if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
  143. goto err;
  144. if (nr_holes != new_bio->bi_vcnt) {
  145. pr_err("pblk: malformed bio\n");
  146. goto err;
  147. }
  148. new_bio->bi_iter.bi_sector = 0; /* internal bio */
  149. bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
  150. new_bio->bi_private = &wait;
  151. new_bio->bi_end_io = pblk_end_bio_sync;
  152. rqd->bio = new_bio;
  153. rqd->nr_ppas = nr_holes;
  154. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  155. rqd->end_io = NULL;
  156. if (unlikely(nr_secs > 1 && nr_holes == 1)) {
  157. ppa_ptr = rqd->ppa_list;
  158. dma_ppa_list = rqd->dma_ppa_list;
  159. rqd->ppa_addr = rqd->ppa_list[0];
  160. }
  161. ret = pblk_submit_read_io(pblk, rqd);
  162. if (ret) {
  163. bio_put(rqd->bio);
  164. pr_err("pblk: read IO submission failed\n");
  165. goto err;
  166. }
  167. if (!wait_for_completion_io_timeout(&wait,
  168. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  169. pr_err("pblk: partial read I/O timed out\n");
  170. }
  171. if (rqd->error) {
  172. atomic_long_inc(&pblk->read_failed);
  173. #ifdef CONFIG_NVM_DEBUG
  174. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  175. #endif
  176. }
  177. if (unlikely(nr_secs > 1 && nr_holes == 1)) {
  178. rqd->ppa_list = ppa_ptr;
  179. rqd->dma_ppa_list = dma_ppa_list;
  180. }
  181. /* Fill the holes in the original bio */
  182. i = 0;
  183. hole = find_first_zero_bit(read_bitmap, nr_secs);
  184. do {
  185. src_bv = new_bio->bi_io_vec[i++];
  186. dst_bv = bio->bi_io_vec[bio_init_idx + hole];
  187. src_p = kmap_atomic(src_bv.bv_page);
  188. dst_p = kmap_atomic(dst_bv.bv_page);
  189. memcpy(dst_p + dst_bv.bv_offset,
  190. src_p + src_bv.bv_offset,
  191. PBLK_EXPOSED_PAGE_SIZE);
  192. kunmap_atomic(src_p);
  193. kunmap_atomic(dst_p);
  194. mempool_free(src_bv.bv_page, pblk->page_pool);
  195. hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
  196. } while (hole < nr_secs);
  197. bio_put(new_bio);
  198. /* Complete the original bio and associated request */
  199. rqd->bio = bio;
  200. rqd->nr_ppas = nr_secs;
  201. rqd->private = pblk;
  202. bio_endio(bio);
  203. pblk_end_io_read(rqd);
  204. return NVM_IO_OK;
  205. err:
  206. /* Free allocated pages in new bio */
  207. pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
  208. rqd->private = pblk;
  209. pblk_end_io_read(rqd);
  210. return NVM_IO_ERR;
  211. }
  212. static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
  213. unsigned long *read_bitmap)
  214. {
  215. struct bio *bio = rqd->bio;
  216. struct ppa_addr ppa;
  217. sector_t lba = pblk_get_lba(bio);
  218. /* logic error: lba out-of-bounds. Ignore read request */
  219. if (lba >= pblk->rl.nr_secs) {
  220. WARN(1, "pblk: read lba out of bounds\n");
  221. return;
  222. }
  223. pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
  224. #ifdef CONFIG_NVM_DEBUG
  225. atomic_long_inc(&pblk->inflight_reads);
  226. #endif
  227. retry:
  228. if (pblk_ppa_empty(ppa)) {
  229. WARN_ON(test_and_set_bit(0, read_bitmap));
  230. return;
  231. }
  232. /* Try to read from write buffer. The address is later checked on the
  233. * write buffer to prevent retrieving overwritten data.
  234. */
  235. if (pblk_addr_in_cache(ppa)) {
  236. if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) {
  237. pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
  238. goto retry;
  239. }
  240. WARN_ON(test_and_set_bit(0, read_bitmap));
  241. #ifdef CONFIG_NVM_DEBUG
  242. atomic_long_inc(&pblk->cache_reads);
  243. #endif
  244. } else {
  245. rqd->ppa_addr = ppa;
  246. }
  247. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  248. }
  249. int pblk_submit_read(struct pblk *pblk, struct bio *bio)
  250. {
  251. struct nvm_tgt_dev *dev = pblk->dev;
  252. unsigned int nr_secs = pblk_get_secs(bio);
  253. struct nvm_rq *rqd;
  254. unsigned long read_bitmap; /* Max 64 ppas per request */
  255. unsigned int bio_init_idx;
  256. int ret = NVM_IO_ERR;
  257. if (nr_secs > PBLK_MAX_REQ_ADDRS)
  258. return NVM_IO_ERR;
  259. bitmap_zero(&read_bitmap, nr_secs);
  260. rqd = pblk_alloc_rqd(pblk, READ);
  261. if (IS_ERR(rqd)) {
  262. pr_err_ratelimited("pblk: not able to alloc rqd");
  263. return NVM_IO_ERR;
  264. }
  265. rqd->opcode = NVM_OP_PREAD;
  266. rqd->bio = bio;
  267. rqd->nr_ppas = nr_secs;
  268. rqd->private = pblk;
  269. rqd->end_io = pblk_end_io_read;
  270. /* Save the index for this bio's start. This is needed in case
  271. * we need to fill a partial read.
  272. */
  273. bio_init_idx = pblk_get_bi_idx(bio);
  274. rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  275. &rqd->dma_meta_list);
  276. if (!rqd->meta_list) {
  277. pr_err("pblk: not able to allocate ppa list\n");
  278. goto fail_rqd_free;
  279. }
  280. if (nr_secs > 1) {
  281. rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
  282. rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
  283. pblk_read_ppalist_rq(pblk, rqd, &read_bitmap);
  284. } else {
  285. pblk_read_rq(pblk, rqd, &read_bitmap);
  286. }
  287. bio_get(bio);
  288. if (bitmap_full(&read_bitmap, nr_secs)) {
  289. bio_endio(bio);
  290. atomic_inc(&pblk->inflight_io);
  291. pblk_end_io_read(rqd);
  292. return NVM_IO_OK;
  293. }
  294. /* All sectors are to be read from the device */
  295. if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
  296. struct bio *int_bio = NULL;
  297. struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  298. /* Clone read bio to deal with read errors internally */
  299. int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
  300. if (!int_bio) {
  301. pr_err("pblk: could not clone read bio\n");
  302. return NVM_IO_ERR;
  303. }
  304. rqd->bio = int_bio;
  305. r_ctx->private = bio;
  306. ret = pblk_submit_read_io(pblk, rqd);
  307. if (ret) {
  308. pr_err("pblk: read IO submission failed\n");
  309. if (int_bio)
  310. bio_put(int_bio);
  311. return ret;
  312. }
  313. return NVM_IO_OK;
  314. }
  315. /* The read bio request could be partially filled by the write buffer,
  316. * but there are some holes that need to be read from the drive.
  317. */
  318. ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
  319. if (ret) {
  320. pr_err("pblk: failed to perform partial read\n");
  321. return ret;
  322. }
  323. return NVM_IO_OK;
  324. fail_rqd_free:
  325. pblk_free_rqd(pblk, rqd, READ);
  326. return ret;
  327. }
  328. static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
  329. struct pblk_line *line, u64 *lba_list,
  330. unsigned int nr_secs)
  331. {
  332. struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
  333. int valid_secs = 0;
  334. int i;
  335. pblk_lookup_l2p_rand(pblk, ppas, lba_list, nr_secs);
  336. for (i = 0; i < nr_secs; i++) {
  337. if (pblk_addr_in_cache(ppas[i]) || ppas[i].g.blk != line->id ||
  338. pblk_ppa_empty(ppas[i])) {
  339. lba_list[i] = ADDR_EMPTY;
  340. continue;
  341. }
  342. rqd->ppa_list[valid_secs++] = ppas[i];
  343. }
  344. #ifdef CONFIG_NVM_DEBUG
  345. atomic_long_add(valid_secs, &pblk->inflight_reads);
  346. #endif
  347. return valid_secs;
  348. }
  349. static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
  350. struct pblk_line *line, sector_t lba)
  351. {
  352. struct ppa_addr ppa;
  353. int valid_secs = 0;
  354. if (lba == ADDR_EMPTY)
  355. goto out;
  356. /* logic error: lba out-of-bounds */
  357. if (lba >= pblk->rl.nr_secs) {
  358. WARN(1, "pblk: read lba out of bounds\n");
  359. goto out;
  360. }
  361. spin_lock(&pblk->trans_lock);
  362. ppa = pblk_trans_map_get(pblk, lba);
  363. spin_unlock(&pblk->trans_lock);
  364. /* Ignore updated values until the moment */
  365. if (pblk_addr_in_cache(ppa) || ppa.g.blk != line->id ||
  366. pblk_ppa_empty(ppa))
  367. goto out;
  368. rqd->ppa_addr = ppa;
  369. valid_secs = 1;
  370. #ifdef CONFIG_NVM_DEBUG
  371. atomic_long_inc(&pblk->inflight_reads);
  372. #endif
  373. out:
  374. return valid_secs;
  375. }
  376. int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
  377. unsigned int nr_secs, unsigned int *secs_to_gc,
  378. struct pblk_line *line)
  379. {
  380. struct nvm_tgt_dev *dev = pblk->dev;
  381. struct nvm_geo *geo = &dev->geo;
  382. struct bio *bio;
  383. struct nvm_rq rqd;
  384. int ret, data_len;
  385. DECLARE_COMPLETION_ONSTACK(wait);
  386. memset(&rqd, 0, sizeof(struct nvm_rq));
  387. rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  388. &rqd.dma_meta_list);
  389. if (!rqd.meta_list)
  390. return NVM_IO_ERR;
  391. if (nr_secs > 1) {
  392. rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
  393. rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
  394. *secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, line, lba_list,
  395. nr_secs);
  396. if (*secs_to_gc == 1)
  397. rqd.ppa_addr = rqd.ppa_list[0];
  398. } else {
  399. *secs_to_gc = read_rq_gc(pblk, &rqd, line, lba_list[0]);
  400. }
  401. if (!(*secs_to_gc))
  402. goto out;
  403. data_len = (*secs_to_gc) * geo->sec_size;
  404. bio = pblk_bio_map_addr(pblk, data, *secs_to_gc, data_len,
  405. PBLK_KMALLOC_META, GFP_KERNEL);
  406. if (IS_ERR(bio)) {
  407. pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
  408. goto err_free_dma;
  409. }
  410. bio->bi_iter.bi_sector = 0; /* internal bio */
  411. bio_set_op_attrs(bio, REQ_OP_READ, 0);
  412. rqd.opcode = NVM_OP_PREAD;
  413. rqd.end_io = pblk_end_io_sync;
  414. rqd.private = &wait;
  415. rqd.nr_ppas = *secs_to_gc;
  416. rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  417. rqd.bio = bio;
  418. ret = pblk_submit_read_io(pblk, &rqd);
  419. if (ret) {
  420. bio_endio(bio);
  421. pr_err("pblk: GC read request failed\n");
  422. goto err_free_dma;
  423. }
  424. if (!wait_for_completion_io_timeout(&wait,
  425. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  426. pr_err("pblk: GC read I/O timed out\n");
  427. }
  428. atomic_dec(&pblk->inflight_io);
  429. if (rqd.error) {
  430. atomic_long_inc(&pblk->read_failed_gc);
  431. #ifdef CONFIG_NVM_DEBUG
  432. pblk_print_failed_rqd(pblk, &rqd, rqd.error);
  433. #endif
  434. }
  435. #ifdef CONFIG_NVM_DEBUG
  436. atomic_long_add(*secs_to_gc, &pblk->sync_reads);
  437. atomic_long_add(*secs_to_gc, &pblk->recov_gc_reads);
  438. atomic_long_sub(*secs_to_gc, &pblk->inflight_reads);
  439. #endif
  440. out:
  441. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  442. return NVM_IO_OK;
  443. err_free_dma:
  444. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  445. return NVM_IO_ERR;
  446. }