pblk-write.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-write.c - pblk's write path from write buffer to media
  16. */
  17. #include "pblk.h"
  18. static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
  19. struct pblk_c_ctx *c_ctx)
  20. {
  21. struct bio *original_bio;
  22. unsigned long ret;
  23. int i;
  24. for (i = 0; i < c_ctx->nr_valid; i++) {
  25. struct pblk_w_ctx *w_ctx;
  26. w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
  27. while ((original_bio = bio_list_pop(&w_ctx->bios)))
  28. bio_endio(original_bio);
  29. }
  30. if (c_ctx->nr_padded)
  31. pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
  32. c_ctx->nr_padded);
  33. #ifdef CONFIG_NVM_DEBUG
  34. atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
  35. #endif
  36. ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
  37. bio_put(rqd->bio);
  38. pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  39. return ret;
  40. }
  41. static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
  42. struct nvm_rq *rqd,
  43. struct pblk_c_ctx *c_ctx)
  44. {
  45. list_del(&c_ctx->list);
  46. return pblk_end_w_bio(pblk, rqd, c_ctx);
  47. }
  48. static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
  49. struct pblk_c_ctx *c_ctx)
  50. {
  51. struct pblk_c_ctx *c, *r;
  52. unsigned long flags;
  53. unsigned long pos;
  54. #ifdef CONFIG_NVM_DEBUG
  55. atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
  56. #endif
  57. pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
  58. pos = pblk_rb_sync_init(&pblk->rwb, &flags);
  59. if (pos == c_ctx->sentry) {
  60. pos = pblk_end_w_bio(pblk, rqd, c_ctx);
  61. retry:
  62. list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
  63. rqd = nvm_rq_from_c_ctx(c);
  64. if (c->sentry == pos) {
  65. pos = pblk_end_queued_w_bio(pblk, rqd, c);
  66. goto retry;
  67. }
  68. }
  69. } else {
  70. WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
  71. list_add_tail(&c_ctx->list, &pblk->compl_list);
  72. }
  73. pblk_rb_sync_end(&pblk->rwb, &flags);
  74. }
  75. /* When a write fails, we are not sure whether the block has grown bad or a page
  76. * range is more susceptible to write errors. If a high number of pages fail, we
  77. * assume that the block is bad and we mark it accordingly. In all cases, we
  78. * remap and resubmit the failed entries as fast as possible; if a flush is
  79. * waiting on a completion, the whole stack would stall otherwise.
  80. */
  81. static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
  82. {
  83. void *comp_bits = &rqd->ppa_status;
  84. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  85. struct pblk_rec_ctx *recovery;
  86. struct ppa_addr *ppa_list = rqd->ppa_list;
  87. int nr_ppas = rqd->nr_ppas;
  88. unsigned int c_entries;
  89. int bit, ret;
  90. if (unlikely(nr_ppas == 1))
  91. ppa_list = &rqd->ppa_addr;
  92. recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
  93. INIT_LIST_HEAD(&recovery->failed);
  94. bit = -1;
  95. while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
  96. struct pblk_rb_entry *entry;
  97. struct ppa_addr ppa;
  98. /* Logic error */
  99. if (bit > c_ctx->nr_valid) {
  100. WARN_ONCE(1, "pblk: corrupted write request\n");
  101. mempool_free(recovery, pblk->rec_pool);
  102. goto out;
  103. }
  104. ppa = ppa_list[bit];
  105. entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
  106. if (!entry) {
  107. pr_err("pblk: could not scan entry on write failure\n");
  108. mempool_free(recovery, pblk->rec_pool);
  109. goto out;
  110. }
  111. /* The list is filled first and emptied afterwards. No need for
  112. * protecting it with a lock
  113. */
  114. list_add_tail(&entry->index, &recovery->failed);
  115. }
  116. c_entries = find_first_bit(comp_bits, nr_ppas);
  117. ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
  118. if (ret) {
  119. pr_err("pblk: could not recover from write failure\n");
  120. mempool_free(recovery, pblk->rec_pool);
  121. goto out;
  122. }
  123. INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
  124. queue_work(pblk->close_wq, &recovery->ws_rec);
  125. out:
  126. pblk_complete_write(pblk, rqd, c_ctx);
  127. }
  128. static void pblk_end_io_write(struct nvm_rq *rqd)
  129. {
  130. struct pblk *pblk = rqd->private;
  131. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  132. if (rqd->error) {
  133. pblk_log_write_err(pblk, rqd);
  134. return pblk_end_w_fail(pblk, rqd);
  135. }
  136. #ifdef CONFIG_NVM_DEBUG
  137. else
  138. WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
  139. #endif
  140. pblk_complete_write(pblk, rqd, c_ctx);
  141. atomic_dec(&pblk->inflight_io);
  142. }
  143. static void pblk_end_io_write_meta(struct nvm_rq *rqd)
  144. {
  145. struct pblk *pblk = rqd->private;
  146. struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
  147. struct pblk_line *line = m_ctx->private;
  148. struct pblk_emeta *emeta = line->emeta;
  149. int sync;
  150. pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
  151. if (rqd->error) {
  152. pblk_log_write_err(pblk, rqd);
  153. pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
  154. }
  155. sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
  156. if (sync == emeta->nr_entries)
  157. pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
  158. GFP_ATOMIC, pblk->close_wq);
  159. pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
  160. atomic_dec(&pblk->inflight_io);
  161. }
  162. static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
  163. unsigned int nr_secs,
  164. nvm_end_io_fn(*end_io))
  165. {
  166. struct nvm_tgt_dev *dev = pblk->dev;
  167. /* Setup write request */
  168. rqd->opcode = NVM_OP_PWRITE;
  169. rqd->nr_ppas = nr_secs;
  170. rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
  171. rqd->private = pblk;
  172. rqd->end_io = end_io;
  173. rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  174. &rqd->dma_meta_list);
  175. if (!rqd->meta_list)
  176. return -ENOMEM;
  177. rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
  178. rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
  179. return 0;
  180. }
  181. static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
  182. struct ppa_addr *erase_ppa)
  183. {
  184. struct pblk_line_meta *lm = &pblk->lm;
  185. struct pblk_line *e_line = pblk_line_get_erase(pblk);
  186. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  187. unsigned int valid = c_ctx->nr_valid;
  188. unsigned int padded = c_ctx->nr_padded;
  189. unsigned int nr_secs = valid + padded;
  190. unsigned long *lun_bitmap;
  191. int ret;
  192. lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
  193. if (!lun_bitmap)
  194. return -ENOMEM;
  195. c_ctx->lun_bitmap = lun_bitmap;
  196. ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
  197. if (ret) {
  198. kfree(lun_bitmap);
  199. return ret;
  200. }
  201. if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
  202. pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
  203. else
  204. pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
  205. valid, erase_ppa);
  206. return 0;
  207. }
  208. int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
  209. struct pblk_c_ctx *c_ctx)
  210. {
  211. struct pblk_line_meta *lm = &pblk->lm;
  212. unsigned long *lun_bitmap;
  213. int ret;
  214. lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
  215. if (!lun_bitmap)
  216. return -ENOMEM;
  217. c_ctx->lun_bitmap = lun_bitmap;
  218. ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
  219. if (ret)
  220. return ret;
  221. pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
  222. rqd->ppa_status = (u64)0;
  223. rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
  224. return ret;
  225. }
  226. static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
  227. unsigned int secs_to_flush)
  228. {
  229. int secs_to_sync;
  230. secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
  231. #ifdef CONFIG_NVM_DEBUG
  232. if ((!secs_to_sync && secs_to_flush)
  233. || (secs_to_sync < 0)
  234. || (secs_to_sync > secs_avail && !secs_to_flush)) {
  235. pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
  236. secs_avail, secs_to_sync, secs_to_flush);
  237. }
  238. #endif
  239. return secs_to_sync;
  240. }
  241. int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
  242. {
  243. struct nvm_tgt_dev *dev = pblk->dev;
  244. struct nvm_geo *geo = &dev->geo;
  245. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  246. struct pblk_line_meta *lm = &pblk->lm;
  247. struct pblk_emeta *emeta = meta_line->emeta;
  248. struct pblk_g_ctx *m_ctx;
  249. struct bio *bio;
  250. struct nvm_rq *rqd;
  251. void *data;
  252. u64 paddr;
  253. int rq_ppas = pblk->min_write_pgs;
  254. int id = meta_line->id;
  255. int rq_len;
  256. int i, j;
  257. int ret;
  258. rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
  259. m_ctx = nvm_rq_to_pdu(rqd);
  260. m_ctx->private = meta_line;
  261. rq_len = rq_ppas * geo->sec_size;
  262. data = ((void *)emeta->buf) + emeta->mem;
  263. bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
  264. l_mg->emeta_alloc_type, GFP_KERNEL);
  265. if (IS_ERR(bio)) {
  266. ret = PTR_ERR(bio);
  267. goto fail_free_rqd;
  268. }
  269. bio->bi_iter.bi_sector = 0; /* internal bio */
  270. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  271. rqd->bio = bio;
  272. ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
  273. if (ret)
  274. goto fail_free_bio;
  275. for (i = 0; i < rqd->nr_ppas; ) {
  276. spin_lock(&meta_line->lock);
  277. paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
  278. spin_unlock(&meta_line->lock);
  279. for (j = 0; j < rq_ppas; j++, i++, paddr++)
  280. rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
  281. }
  282. emeta->mem += rq_len;
  283. if (emeta->mem >= lm->emeta_len[0]) {
  284. spin_lock(&l_mg->close_lock);
  285. list_del(&meta_line->list);
  286. spin_unlock(&l_mg->close_lock);
  287. }
  288. pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
  289. ret = pblk_submit_io(pblk, rqd);
  290. if (ret) {
  291. pr_err("pblk: emeta I/O submission failed: %d\n", ret);
  292. goto fail_rollback;
  293. }
  294. return NVM_IO_OK;
  295. fail_rollback:
  296. pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
  297. spin_lock(&l_mg->close_lock);
  298. pblk_dealloc_page(pblk, meta_line, rq_ppas);
  299. list_add(&meta_line->list, &meta_line->list);
  300. spin_unlock(&l_mg->close_lock);
  301. fail_free_bio:
  302. bio_put(bio);
  303. fail_free_rqd:
  304. pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
  305. return ret;
  306. }
  307. static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
  308. struct pblk_line *meta_line,
  309. struct nvm_rq *data_rqd)
  310. {
  311. struct nvm_tgt_dev *dev = pblk->dev;
  312. struct nvm_geo *geo = &dev->geo;
  313. struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
  314. struct pblk_line *data_line = pblk_line_get_data(pblk);
  315. struct ppa_addr ppa, ppa_opt;
  316. u64 paddr;
  317. int pos_opt;
  318. /* Schedule a metadata I/O that is half the distance from the data I/O
  319. * with regards to the number of LUNs forming the pblk instance. This
  320. * balances LUN conflicts across every I/O.
  321. *
  322. * When the LUN configuration changes (e.g., due to GC), this distance
  323. * can align, which would result on metadata and data I/Os colliding. In
  324. * this case, modify the distance to not be optimal, but move the
  325. * optimal in the right direction.
  326. */
  327. paddr = pblk_lookup_page(pblk, meta_line);
  328. ppa = addr_to_gen_ppa(pblk, paddr, 0);
  329. ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
  330. pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
  331. if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
  332. test_bit(pos_opt, data_line->blk_bitmap))
  333. return true;
  334. if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
  335. data_line->meta_distance--;
  336. return false;
  337. }
  338. static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
  339. struct nvm_rq *data_rqd)
  340. {
  341. struct pblk_line_meta *lm = &pblk->lm;
  342. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  343. struct pblk_line *meta_line;
  344. spin_lock(&l_mg->close_lock);
  345. retry:
  346. if (list_empty(&l_mg->emeta_list)) {
  347. spin_unlock(&l_mg->close_lock);
  348. return NULL;
  349. }
  350. meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
  351. if (meta_line->emeta->mem >= lm->emeta_len[0])
  352. goto retry;
  353. spin_unlock(&l_mg->close_lock);
  354. if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
  355. return NULL;
  356. return meta_line;
  357. }
  358. static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
  359. {
  360. struct ppa_addr erase_ppa;
  361. struct pblk_line *meta_line;
  362. int err;
  363. ppa_set_empty(&erase_ppa);
  364. /* Assign lbas to ppas and populate request structure */
  365. err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
  366. if (err) {
  367. pr_err("pblk: could not setup write request: %d\n", err);
  368. return NVM_IO_ERR;
  369. }
  370. meta_line = pblk_should_submit_meta_io(pblk, rqd);
  371. /* Submit data write for current data line */
  372. err = pblk_submit_io(pblk, rqd);
  373. if (err) {
  374. pr_err("pblk: data I/O submission failed: %d\n", err);
  375. return NVM_IO_ERR;
  376. }
  377. if (!ppa_empty(erase_ppa)) {
  378. /* Submit erase for next data line */
  379. if (pblk_blk_erase_async(pblk, erase_ppa)) {
  380. struct pblk_line *e_line = pblk_line_get_erase(pblk);
  381. struct nvm_tgt_dev *dev = pblk->dev;
  382. struct nvm_geo *geo = &dev->geo;
  383. int bit;
  384. atomic_inc(&e_line->left_eblks);
  385. bit = pblk_ppa_to_pos(geo, erase_ppa);
  386. WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
  387. }
  388. }
  389. if (meta_line) {
  390. /* Submit metadata write for previous data line */
  391. err = pblk_submit_meta_io(pblk, meta_line);
  392. if (err) {
  393. pr_err("pblk: metadata I/O submission failed: %d", err);
  394. return NVM_IO_ERR;
  395. }
  396. }
  397. return NVM_IO_OK;
  398. }
  399. static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
  400. {
  401. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  402. struct bio *bio = rqd->bio;
  403. if (c_ctx->nr_padded)
  404. pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
  405. c_ctx->nr_padded);
  406. }
  407. static int pblk_submit_write(struct pblk *pblk)
  408. {
  409. struct bio *bio;
  410. struct nvm_rq *rqd;
  411. unsigned int secs_avail, secs_to_sync, secs_to_com;
  412. unsigned int secs_to_flush;
  413. unsigned long pos;
  414. /* If there are no sectors in the cache, flushes (bios without data)
  415. * will be cleared on the cache threads
  416. */
  417. secs_avail = pblk_rb_read_count(&pblk->rwb);
  418. if (!secs_avail)
  419. return 1;
  420. secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
  421. if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
  422. return 1;
  423. secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
  424. if (secs_to_sync > pblk->max_write_pgs) {
  425. pr_err("pblk: bad buffer sync calculation\n");
  426. return 1;
  427. }
  428. secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
  429. pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
  430. bio = bio_alloc(GFP_KERNEL, secs_to_sync);
  431. bio->bi_iter.bi_sector = 0; /* internal bio */
  432. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  433. rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
  434. rqd->bio = bio;
  435. if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
  436. secs_avail)) {
  437. pr_err("pblk: corrupted write bio\n");
  438. goto fail_put_bio;
  439. }
  440. if (pblk_submit_io_set(pblk, rqd))
  441. goto fail_free_bio;
  442. #ifdef CONFIG_NVM_DEBUG
  443. atomic_long_add(secs_to_sync, &pblk->sub_writes);
  444. #endif
  445. return 0;
  446. fail_free_bio:
  447. pblk_free_write_rqd(pblk, rqd);
  448. fail_put_bio:
  449. bio_put(bio);
  450. pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  451. return 1;
  452. }
  453. int pblk_write_ts(void *data)
  454. {
  455. struct pblk *pblk = data;
  456. while (!kthread_should_stop()) {
  457. if (!pblk_submit_write(pblk))
  458. continue;
  459. set_current_state(TASK_INTERRUPTIBLE);
  460. io_schedule();
  461. }
  462. return 0;
  463. }