pblk-read.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-read.c - pblk's read path
  16. */
  17. #include "pblk.h"
  18. /*
  19. * There is no guarantee that the value read from cache has not been updated and
  20. * resides at another location in the cache. We guarantee though that if the
  21. * value is read from the cache, it belongs to the mapped lba. In order to
  22. * guarantee and order between writes and reads are ordered, a flush must be
  23. * issued.
  24. */
  25. static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
  26. sector_t lba, struct ppa_addr ppa,
  27. int bio_iter, bool advanced_bio)
  28. {
  29. #ifdef CONFIG_NVM_DEBUG
  30. /* Callers must ensure that the ppa points to a cache address */
  31. BUG_ON(pblk_ppa_empty(ppa));
  32. BUG_ON(!pblk_addr_in_cache(ppa));
  33. #endif
  34. return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
  35. bio_iter, advanced_bio);
  36. }
  37. static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
  38. unsigned long *read_bitmap)
  39. {
  40. struct bio *bio = rqd->bio;
  41. struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
  42. sector_t blba = pblk_get_lba(bio);
  43. int nr_secs = rqd->nr_ppas;
  44. bool advanced_bio = false;
  45. int i, j = 0;
  46. /* logic error: lba out-of-bounds. Ignore read request */
  47. if (blba + nr_secs >= pblk->rl.nr_secs) {
  48. WARN(1, "pblk: read lbas out of bounds\n");
  49. return;
  50. }
  51. pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
  52. for (i = 0; i < nr_secs; i++) {
  53. struct ppa_addr p = ppas[i];
  54. sector_t lba = blba + i;
  55. retry:
  56. if (pblk_ppa_empty(p)) {
  57. WARN_ON(test_and_set_bit(i, read_bitmap));
  58. if (unlikely(!advanced_bio)) {
  59. bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
  60. advanced_bio = true;
  61. }
  62. goto next;
  63. }
  64. /* Try to read from write buffer. The address is later checked
  65. * on the write buffer to prevent retrieving overwritten data.
  66. */
  67. if (pblk_addr_in_cache(p)) {
  68. if (!pblk_read_from_cache(pblk, bio, lba, p, i,
  69. advanced_bio)) {
  70. pblk_lookup_l2p_seq(pblk, &p, lba, 1);
  71. goto retry;
  72. }
  73. WARN_ON(test_and_set_bit(i, read_bitmap));
  74. advanced_bio = true;
  75. #ifdef CONFIG_NVM_DEBUG
  76. atomic_long_inc(&pblk->cache_reads);
  77. #endif
  78. } else {
  79. /* Read from media non-cached sectors */
  80. rqd->ppa_list[j++] = p;
  81. }
  82. next:
  83. if (advanced_bio)
  84. bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
  85. }
  86. if (pblk_io_aligned(pblk, nr_secs))
  87. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
  88. else
  89. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  90. #ifdef CONFIG_NVM_DEBUG
  91. atomic_long_add(nr_secs, &pblk->inflight_reads);
  92. #endif
  93. }
  94. static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
  95. {
  96. int err;
  97. err = pblk_submit_io(pblk, rqd);
  98. if (err)
  99. return NVM_IO_ERR;
  100. return NVM_IO_OK;
  101. }
  102. static void pblk_end_io_read(struct nvm_rq *rqd)
  103. {
  104. struct pblk *pblk = rqd->private;
  105. struct nvm_tgt_dev *dev = pblk->dev;
  106. struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  107. struct bio *bio = rqd->bio;
  108. if (rqd->error)
  109. pblk_log_read_err(pblk, rqd);
  110. #ifdef CONFIG_NVM_DEBUG
  111. else
  112. WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
  113. #endif
  114. nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
  115. bio_put(bio);
  116. if (r_ctx->private) {
  117. struct bio *orig_bio = r_ctx->private;
  118. #ifdef CONFIG_NVM_DEBUG
  119. WARN_ONCE(orig_bio->bi_status, "pblk: corrupted read bio\n");
  120. #endif
  121. bio_endio(orig_bio);
  122. bio_put(orig_bio);
  123. }
  124. #ifdef CONFIG_NVM_DEBUG
  125. atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
  126. atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
  127. #endif
  128. pblk_free_rqd(pblk, rqd, READ);
  129. atomic_dec(&pblk->inflight_io);
  130. }
  131. static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
  132. unsigned int bio_init_idx,
  133. unsigned long *read_bitmap)
  134. {
  135. struct bio *new_bio, *bio = rqd->bio;
  136. struct bio_vec src_bv, dst_bv;
  137. void *ppa_ptr = NULL;
  138. void *src_p, *dst_p;
  139. dma_addr_t dma_ppa_list = 0;
  140. int nr_secs = rqd->nr_ppas;
  141. int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
  142. int i, ret, hole;
  143. DECLARE_COMPLETION_ONSTACK(wait);
  144. new_bio = bio_alloc(GFP_KERNEL, nr_holes);
  145. if (!new_bio) {
  146. pr_err("pblk: could not alloc read bio\n");
  147. return NVM_IO_ERR;
  148. }
  149. if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
  150. goto err;
  151. if (nr_holes != new_bio->bi_vcnt) {
  152. pr_err("pblk: malformed bio\n");
  153. goto err;
  154. }
  155. new_bio->bi_iter.bi_sector = 0; /* internal bio */
  156. bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
  157. new_bio->bi_private = &wait;
  158. new_bio->bi_end_io = pblk_end_bio_sync;
  159. rqd->bio = new_bio;
  160. rqd->nr_ppas = nr_holes;
  161. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  162. rqd->end_io = NULL;
  163. if (unlikely(nr_secs > 1 && nr_holes == 1)) {
  164. ppa_ptr = rqd->ppa_list;
  165. dma_ppa_list = rqd->dma_ppa_list;
  166. rqd->ppa_addr = rqd->ppa_list[0];
  167. }
  168. ret = pblk_submit_read_io(pblk, rqd);
  169. if (ret) {
  170. bio_put(rqd->bio);
  171. pr_err("pblk: read IO submission failed\n");
  172. goto err;
  173. }
  174. if (!wait_for_completion_io_timeout(&wait,
  175. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  176. pr_err("pblk: partial read I/O timed out\n");
  177. }
  178. if (rqd->error) {
  179. atomic_long_inc(&pblk->read_failed);
  180. #ifdef CONFIG_NVM_DEBUG
  181. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  182. #endif
  183. }
  184. if (unlikely(nr_secs > 1 && nr_holes == 1)) {
  185. rqd->ppa_list = ppa_ptr;
  186. rqd->dma_ppa_list = dma_ppa_list;
  187. }
  188. /* Fill the holes in the original bio */
  189. i = 0;
  190. hole = find_first_zero_bit(read_bitmap, nr_secs);
  191. do {
  192. src_bv = new_bio->bi_io_vec[i++];
  193. dst_bv = bio->bi_io_vec[bio_init_idx + hole];
  194. src_p = kmap_atomic(src_bv.bv_page);
  195. dst_p = kmap_atomic(dst_bv.bv_page);
  196. memcpy(dst_p + dst_bv.bv_offset,
  197. src_p + src_bv.bv_offset,
  198. PBLK_EXPOSED_PAGE_SIZE);
  199. kunmap_atomic(src_p);
  200. kunmap_atomic(dst_p);
  201. mempool_free(src_bv.bv_page, pblk->page_pool);
  202. hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
  203. } while (hole < nr_secs);
  204. bio_put(new_bio);
  205. /* Complete the original bio and associated request */
  206. rqd->bio = bio;
  207. rqd->nr_ppas = nr_secs;
  208. rqd->private = pblk;
  209. bio_endio(bio);
  210. pblk_end_io_read(rqd);
  211. return NVM_IO_OK;
  212. err:
  213. /* Free allocated pages in new bio */
  214. pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
  215. rqd->private = pblk;
  216. pblk_end_io_read(rqd);
  217. return NVM_IO_ERR;
  218. }
  219. static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
  220. unsigned long *read_bitmap)
  221. {
  222. struct bio *bio = rqd->bio;
  223. struct ppa_addr ppa;
  224. sector_t lba = pblk_get_lba(bio);
  225. /* logic error: lba out-of-bounds. Ignore read request */
  226. if (lba >= pblk->rl.nr_secs) {
  227. WARN(1, "pblk: read lba out of bounds\n");
  228. return;
  229. }
  230. pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
  231. #ifdef CONFIG_NVM_DEBUG
  232. atomic_long_inc(&pblk->inflight_reads);
  233. #endif
  234. retry:
  235. if (pblk_ppa_empty(ppa)) {
  236. WARN_ON(test_and_set_bit(0, read_bitmap));
  237. return;
  238. }
  239. /* Try to read from write buffer. The address is later checked on the
  240. * write buffer to prevent retrieving overwritten data.
  241. */
  242. if (pblk_addr_in_cache(ppa)) {
  243. if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
  244. pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
  245. goto retry;
  246. }
  247. WARN_ON(test_and_set_bit(0, read_bitmap));
  248. #ifdef CONFIG_NVM_DEBUG
  249. atomic_long_inc(&pblk->cache_reads);
  250. #endif
  251. } else {
  252. rqd->ppa_addr = ppa;
  253. }
  254. rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  255. }
  256. int pblk_submit_read(struct pblk *pblk, struct bio *bio)
  257. {
  258. struct nvm_tgt_dev *dev = pblk->dev;
  259. unsigned int nr_secs = pblk_get_secs(bio);
  260. struct nvm_rq *rqd;
  261. unsigned long read_bitmap; /* Max 64 ppas per request */
  262. unsigned int bio_init_idx;
  263. int ret = NVM_IO_ERR;
  264. if (nr_secs > PBLK_MAX_REQ_ADDRS)
  265. return NVM_IO_ERR;
  266. bitmap_zero(&read_bitmap, nr_secs);
  267. rqd = pblk_alloc_rqd(pblk, READ);
  268. if (IS_ERR(rqd)) {
  269. pr_err_ratelimited("pblk: not able to alloc rqd");
  270. return NVM_IO_ERR;
  271. }
  272. rqd->opcode = NVM_OP_PREAD;
  273. rqd->bio = bio;
  274. rqd->nr_ppas = nr_secs;
  275. rqd->private = pblk;
  276. rqd->end_io = pblk_end_io_read;
  277. /* Save the index for this bio's start. This is needed in case
  278. * we need to fill a partial read.
  279. */
  280. bio_init_idx = pblk_get_bi_idx(bio);
  281. rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  282. &rqd->dma_meta_list);
  283. if (!rqd->meta_list) {
  284. pr_err("pblk: not able to allocate ppa list\n");
  285. goto fail_rqd_free;
  286. }
  287. if (nr_secs > 1) {
  288. rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
  289. rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
  290. pblk_read_ppalist_rq(pblk, rqd, &read_bitmap);
  291. } else {
  292. pblk_read_rq(pblk, rqd, &read_bitmap);
  293. }
  294. bio_get(bio);
  295. if (bitmap_full(&read_bitmap, nr_secs)) {
  296. bio_endio(bio);
  297. atomic_inc(&pblk->inflight_io);
  298. pblk_end_io_read(rqd);
  299. return NVM_IO_OK;
  300. }
  301. /* All sectors are to be read from the device */
  302. if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
  303. struct bio *int_bio = NULL;
  304. struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
  305. /* Clone read bio to deal with read errors internally */
  306. int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
  307. if (!int_bio) {
  308. pr_err("pblk: could not clone read bio\n");
  309. return NVM_IO_ERR;
  310. }
  311. rqd->bio = int_bio;
  312. r_ctx->private = bio;
  313. ret = pblk_submit_read_io(pblk, rqd);
  314. if (ret) {
  315. pr_err("pblk: read IO submission failed\n");
  316. if (int_bio)
  317. bio_put(int_bio);
  318. return ret;
  319. }
  320. return NVM_IO_OK;
  321. }
  322. /* The read bio request could be partially filled by the write buffer,
  323. * but there are some holes that need to be read from the drive.
  324. */
  325. ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
  326. if (ret) {
  327. pr_err("pblk: failed to perform partial read\n");
  328. return ret;
  329. }
  330. return NVM_IO_OK;
  331. fail_rqd_free:
  332. pblk_free_rqd(pblk, rqd, READ);
  333. return ret;
  334. }
  335. static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
  336. struct pblk_line *line, u64 *lba_list,
  337. unsigned int nr_secs)
  338. {
  339. struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
  340. int valid_secs = 0;
  341. int i;
  342. pblk_lookup_l2p_rand(pblk, ppas, lba_list, nr_secs);
  343. for (i = 0; i < nr_secs; i++) {
  344. if (pblk_addr_in_cache(ppas[i]) || ppas[i].g.blk != line->id ||
  345. pblk_ppa_empty(ppas[i])) {
  346. lba_list[i] = ADDR_EMPTY;
  347. continue;
  348. }
  349. rqd->ppa_list[valid_secs++] = ppas[i];
  350. }
  351. #ifdef CONFIG_NVM_DEBUG
  352. atomic_long_add(valid_secs, &pblk->inflight_reads);
  353. #endif
  354. return valid_secs;
  355. }
  356. static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
  357. struct pblk_line *line, sector_t lba)
  358. {
  359. struct ppa_addr ppa;
  360. int valid_secs = 0;
  361. if (lba == ADDR_EMPTY)
  362. goto out;
  363. /* logic error: lba out-of-bounds */
  364. if (lba >= pblk->rl.nr_secs) {
  365. WARN(1, "pblk: read lba out of bounds\n");
  366. goto out;
  367. }
  368. spin_lock(&pblk->trans_lock);
  369. ppa = pblk_trans_map_get(pblk, lba);
  370. spin_unlock(&pblk->trans_lock);
  371. /* Ignore updated values until the moment */
  372. if (pblk_addr_in_cache(ppa) || ppa.g.blk != line->id ||
  373. pblk_ppa_empty(ppa))
  374. goto out;
  375. rqd->ppa_addr = ppa;
  376. valid_secs = 1;
  377. #ifdef CONFIG_NVM_DEBUG
  378. atomic_long_inc(&pblk->inflight_reads);
  379. #endif
  380. out:
  381. return valid_secs;
  382. }
  383. int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
  384. unsigned int nr_secs, unsigned int *secs_to_gc,
  385. struct pblk_line *line)
  386. {
  387. struct nvm_tgt_dev *dev = pblk->dev;
  388. struct nvm_geo *geo = &dev->geo;
  389. struct bio *bio;
  390. struct nvm_rq rqd;
  391. int ret, data_len;
  392. DECLARE_COMPLETION_ONSTACK(wait);
  393. memset(&rqd, 0, sizeof(struct nvm_rq));
  394. rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  395. &rqd.dma_meta_list);
  396. if (!rqd.meta_list)
  397. return NVM_IO_ERR;
  398. if (nr_secs > 1) {
  399. rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
  400. rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
  401. *secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, line, lba_list,
  402. nr_secs);
  403. if (*secs_to_gc == 1)
  404. rqd.ppa_addr = rqd.ppa_list[0];
  405. } else {
  406. *secs_to_gc = read_rq_gc(pblk, &rqd, line, lba_list[0]);
  407. }
  408. if (!(*secs_to_gc))
  409. goto out;
  410. data_len = (*secs_to_gc) * geo->sec_size;
  411. bio = pblk_bio_map_addr(pblk, data, *secs_to_gc, data_len,
  412. PBLK_KMALLOC_META, GFP_KERNEL);
  413. if (IS_ERR(bio)) {
  414. pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
  415. goto err_free_dma;
  416. }
  417. bio->bi_iter.bi_sector = 0; /* internal bio */
  418. bio_set_op_attrs(bio, REQ_OP_READ, 0);
  419. rqd.opcode = NVM_OP_PREAD;
  420. rqd.end_io = pblk_end_io_sync;
  421. rqd.private = &wait;
  422. rqd.nr_ppas = *secs_to_gc;
  423. rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  424. rqd.bio = bio;
  425. ret = pblk_submit_read_io(pblk, &rqd);
  426. if (ret) {
  427. bio_endio(bio);
  428. pr_err("pblk: GC read request failed\n");
  429. goto err_free_dma;
  430. }
  431. if (!wait_for_completion_io_timeout(&wait,
  432. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  433. pr_err("pblk: GC read I/O timed out\n");
  434. }
  435. atomic_dec(&pblk->inflight_io);
  436. if (rqd.error) {
  437. atomic_long_inc(&pblk->read_failed_gc);
  438. #ifdef CONFIG_NVM_DEBUG
  439. pblk_print_failed_rqd(pblk, &rqd, rqd.error);
  440. #endif
  441. }
  442. #ifdef CONFIG_NVM_DEBUG
  443. atomic_long_add(*secs_to_gc, &pblk->sync_reads);
  444. atomic_long_add(*secs_to_gc, &pblk->recov_gc_reads);
  445. atomic_long_sub(*secs_to_gc, &pblk->inflight_reads);
  446. #endif
  447. out:
  448. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  449. return NVM_IO_OK;
  450. err_free_dma:
  451. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  452. return NVM_IO_ERR;
  453. }