pblk-write.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-write.c - pblk's write path from write buffer to media
  16. */
  17. #include "pblk.h"
  18. static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
  19. struct pblk_c_ctx *c_ctx)
  20. {
  21. struct nvm_tgt_dev *dev = pblk->dev;
  22. struct bio *original_bio;
  23. unsigned long ret;
  24. int i;
  25. for (i = 0; i < c_ctx->nr_valid; i++) {
  26. struct pblk_w_ctx *w_ctx;
  27. w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
  28. while ((original_bio = bio_list_pop(&w_ctx->bios)))
  29. bio_endio(original_bio);
  30. }
  31. #ifdef CONFIG_NVM_DEBUG
  32. atomic_long_add(c_ctx->nr_valid, &pblk->sync_writes);
  33. #endif
  34. ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
  35. nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
  36. bio_put(rqd->bio);
  37. pblk_free_rqd(pblk, rqd, WRITE);
  38. return ret;
  39. }
  40. static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
  41. struct nvm_rq *rqd,
  42. struct pblk_c_ctx *c_ctx)
  43. {
  44. list_del(&c_ctx->list);
  45. return pblk_end_w_bio(pblk, rqd, c_ctx);
  46. }
  47. static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
  48. struct pblk_c_ctx *c_ctx)
  49. {
  50. struct pblk_c_ctx *c, *r;
  51. unsigned long flags;
  52. unsigned long pos;
  53. #ifdef CONFIG_NVM_DEBUG
  54. atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
  55. #endif
  56. pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
  57. pos = pblk_rb_sync_init(&pblk->rwb, &flags);
  58. if (pos == c_ctx->sentry) {
  59. pos = pblk_end_w_bio(pblk, rqd, c_ctx);
  60. retry:
  61. list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
  62. rqd = nvm_rq_from_c_ctx(c);
  63. if (c->sentry == pos) {
  64. pos = pblk_end_queued_w_bio(pblk, rqd, c);
  65. goto retry;
  66. }
  67. }
  68. } else {
  69. WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
  70. list_add_tail(&c_ctx->list, &pblk->compl_list);
  71. }
  72. pblk_rb_sync_end(&pblk->rwb, &flags);
  73. }
  74. /* When a write fails, we are not sure whether the block has grown bad or a page
  75. * range is more susceptible to write errors. If a high number of pages fail, we
  76. * assume that the block is bad and we mark it accordingly. In all cases, we
  77. * remap and resubmit the failed entries as fast as possible; if a flush is
  78. * waiting on a completion, the whole stack would stall otherwise.
  79. */
  80. static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
  81. {
  82. void *comp_bits = &rqd->ppa_status;
  83. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  84. struct pblk_rec_ctx *recovery;
  85. struct ppa_addr *ppa_list = rqd->ppa_list;
  86. int nr_ppas = rqd->nr_ppas;
  87. unsigned int c_entries;
  88. int bit, ret;
  89. if (unlikely(nr_ppas == 1))
  90. ppa_list = &rqd->ppa_addr;
  91. recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
  92. if (!recovery) {
  93. pr_err("pblk: could not allocate recovery context\n");
  94. return;
  95. }
  96. INIT_LIST_HEAD(&recovery->failed);
  97. bit = -1;
  98. while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
  99. struct pblk_rb_entry *entry;
  100. struct ppa_addr ppa;
  101. /* Logic error */
  102. if (bit > c_ctx->nr_valid) {
  103. WARN_ONCE(1, "pblk: corrupted write request\n");
  104. mempool_free(recovery, pblk->rec_pool);
  105. goto out;
  106. }
  107. ppa = ppa_list[bit];
  108. entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
  109. if (!entry) {
  110. pr_err("pblk: could not scan entry on write failure\n");
  111. mempool_free(recovery, pblk->rec_pool);
  112. goto out;
  113. }
  114. /* The list is filled first and emptied afterwards. No need for
  115. * protecting it with a lock
  116. */
  117. list_add_tail(&entry->index, &recovery->failed);
  118. }
  119. c_entries = find_first_bit(comp_bits, nr_ppas);
  120. ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
  121. if (ret) {
  122. pr_err("pblk: could not recover from write failure\n");
  123. mempool_free(recovery, pblk->rec_pool);
  124. goto out;
  125. }
  126. INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
  127. queue_work(pblk->close_wq, &recovery->ws_rec);
  128. out:
  129. pblk_complete_write(pblk, rqd, c_ctx);
  130. }
  131. static void pblk_end_io_write(struct nvm_rq *rqd)
  132. {
  133. struct pblk *pblk = rqd->private;
  134. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  135. if (rqd->error) {
  136. pblk_log_write_err(pblk, rqd);
  137. return pblk_end_w_fail(pblk, rqd);
  138. }
  139. #ifdef CONFIG_NVM_DEBUG
  140. else
  141. WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
  142. #endif
  143. pblk_complete_write(pblk, rqd, c_ctx);
  144. atomic_dec(&pblk->inflight_io);
  145. }
  146. static void pblk_end_io_write_meta(struct nvm_rq *rqd)
  147. {
  148. struct pblk *pblk = rqd->private;
  149. struct nvm_tgt_dev *dev = pblk->dev;
  150. struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
  151. struct pblk_line *line = m_ctx->private;
  152. struct pblk_emeta *emeta = line->emeta;
  153. int sync;
  154. pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
  155. if (rqd->error) {
  156. pblk_log_write_err(pblk, rqd);
  157. pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
  158. }
  159. #ifdef CONFIG_NVM_DEBUG
  160. else
  161. WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
  162. #endif
  163. sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
  164. if (sync == emeta->nr_entries)
  165. pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws,
  166. pblk->close_wq);
  167. bio_put(rqd->bio);
  168. nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
  169. pblk_free_rqd(pblk, rqd, READ);
  170. atomic_dec(&pblk->inflight_io);
  171. }
  172. static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
  173. unsigned int nr_secs,
  174. nvm_end_io_fn(*end_io))
  175. {
  176. struct nvm_tgt_dev *dev = pblk->dev;
  177. /* Setup write request */
  178. rqd->opcode = NVM_OP_PWRITE;
  179. rqd->nr_ppas = nr_secs;
  180. rqd->flags = pblk_set_progr_mode(pblk, WRITE);
  181. rqd->private = pblk;
  182. rqd->end_io = end_io;
  183. rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  184. &rqd->dma_meta_list);
  185. if (!rqd->meta_list)
  186. return -ENOMEM;
  187. rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
  188. rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
  189. return 0;
  190. }
  191. static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
  192. struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa)
  193. {
  194. struct pblk_line_meta *lm = &pblk->lm;
  195. struct pblk_line *e_line = pblk_line_get_erase(pblk);
  196. unsigned int valid = c_ctx->nr_valid;
  197. unsigned int padded = c_ctx->nr_padded;
  198. unsigned int nr_secs = valid + padded;
  199. unsigned long *lun_bitmap;
  200. int ret = 0;
  201. lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
  202. if (!lun_bitmap)
  203. return -ENOMEM;
  204. c_ctx->lun_bitmap = lun_bitmap;
  205. ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
  206. if (ret) {
  207. kfree(lun_bitmap);
  208. return ret;
  209. }
  210. if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
  211. pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
  212. else
  213. pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
  214. valid, erase_ppa);
  215. return 0;
  216. }
  217. int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
  218. struct pblk_c_ctx *c_ctx)
  219. {
  220. struct pblk_line_meta *lm = &pblk->lm;
  221. unsigned long *lun_bitmap;
  222. int ret;
  223. lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
  224. if (!lun_bitmap)
  225. return -ENOMEM;
  226. c_ctx->lun_bitmap = lun_bitmap;
  227. ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
  228. if (ret)
  229. return ret;
  230. pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
  231. rqd->ppa_status = (u64)0;
  232. rqd->flags = pblk_set_progr_mode(pblk, WRITE);
  233. return ret;
  234. }
  235. static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
  236. unsigned int secs_to_flush)
  237. {
  238. int secs_to_sync;
  239. secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
  240. #ifdef CONFIG_NVM_DEBUG
  241. if ((!secs_to_sync && secs_to_flush)
  242. || (secs_to_sync < 0)
  243. || (secs_to_sync > secs_avail && !secs_to_flush)) {
  244. pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
  245. secs_avail, secs_to_sync, secs_to_flush);
  246. }
  247. #endif
  248. return secs_to_sync;
  249. }
  250. static inline int pblk_valid_meta_ppa(struct pblk *pblk,
  251. struct pblk_line *meta_line,
  252. struct ppa_addr *ppa_list, int nr_ppas)
  253. {
  254. struct nvm_tgt_dev *dev = pblk->dev;
  255. struct nvm_geo *geo = &dev->geo;
  256. struct pblk_line *data_line;
  257. struct ppa_addr ppa, ppa_opt;
  258. u64 paddr;
  259. int i;
  260. data_line = &pblk->lines[pblk_dev_ppa_to_line(ppa_list[0])];
  261. paddr = pblk_lookup_page(pblk, meta_line);
  262. ppa = addr_to_gen_ppa(pblk, paddr, 0);
  263. if (test_bit(pblk_ppa_to_pos(geo, ppa), data_line->blk_bitmap))
  264. return 1;
  265. /* Schedule a metadata I/O that is half the distance from the data I/O
  266. * with regards to the number of LUNs forming the pblk instance. This
  267. * balances LUN conflicts across every I/O.
  268. *
  269. * When the LUN configuration changes (e.g., due to GC), this distance
  270. * can align, which would result on a LUN deadlock. In this case, modify
  271. * the distance to not be optimal, but allow metadata I/Os to succeed.
  272. */
  273. ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
  274. if (unlikely(ppa_opt.ppa == ppa.ppa)) {
  275. data_line->meta_distance--;
  276. return 0;
  277. }
  278. for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
  279. if (ppa_list[i].g.ch == ppa_opt.g.ch &&
  280. ppa_list[i].g.lun == ppa_opt.g.lun)
  281. return 1;
  282. if (test_bit(pblk_ppa_to_pos(geo, ppa_opt), data_line->blk_bitmap)) {
  283. for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
  284. if (ppa_list[i].g.ch == ppa.g.ch &&
  285. ppa_list[i].g.lun == ppa.g.lun)
  286. return 0;
  287. return 1;
  288. }
  289. return 0;
  290. }
  291. int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
  292. {
  293. struct nvm_tgt_dev *dev = pblk->dev;
  294. struct nvm_geo *geo = &dev->geo;
  295. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  296. struct pblk_line_meta *lm = &pblk->lm;
  297. struct pblk_emeta *emeta = meta_line->emeta;
  298. struct pblk_g_ctx *m_ctx;
  299. struct bio *bio;
  300. struct nvm_rq *rqd;
  301. void *data;
  302. u64 paddr;
  303. int rq_ppas = pblk->min_write_pgs;
  304. int id = meta_line->id;
  305. int rq_len;
  306. int i, j;
  307. int ret;
  308. rqd = pblk_alloc_rqd(pblk, READ);
  309. if (IS_ERR(rqd)) {
  310. pr_err("pblk: cannot allocate write req.\n");
  311. return PTR_ERR(rqd);
  312. }
  313. m_ctx = nvm_rq_to_pdu(rqd);
  314. m_ctx->private = meta_line;
  315. rq_len = rq_ppas * geo->sec_size;
  316. data = ((void *)emeta->buf) + emeta->mem;
  317. bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
  318. l_mg->emeta_alloc_type, GFP_KERNEL);
  319. if (IS_ERR(bio)) {
  320. ret = PTR_ERR(bio);
  321. goto fail_free_rqd;
  322. }
  323. bio->bi_iter.bi_sector = 0; /* internal bio */
  324. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  325. rqd->bio = bio;
  326. ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
  327. if (ret)
  328. goto fail_free_bio;
  329. for (i = 0; i < rqd->nr_ppas; ) {
  330. spin_lock(&meta_line->lock);
  331. paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
  332. spin_unlock(&meta_line->lock);
  333. for (j = 0; j < rq_ppas; j++, i++, paddr++)
  334. rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
  335. }
  336. emeta->mem += rq_len;
  337. if (emeta->mem >= lm->emeta_len[0]) {
  338. spin_lock(&l_mg->close_lock);
  339. list_del(&meta_line->list);
  340. WARN(!bitmap_full(meta_line->map_bitmap, lm->sec_per_line),
  341. "pblk: corrupt meta line %d\n", meta_line->id);
  342. spin_unlock(&l_mg->close_lock);
  343. }
  344. pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
  345. ret = pblk_submit_io(pblk, rqd);
  346. if (ret) {
  347. pr_err("pblk: emeta I/O submission failed: %d\n", ret);
  348. goto fail_rollback;
  349. }
  350. return NVM_IO_OK;
  351. fail_rollback:
  352. pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
  353. spin_lock(&l_mg->close_lock);
  354. pblk_dealloc_page(pblk, meta_line, rq_ppas);
  355. list_add(&meta_line->list, &meta_line->list);
  356. spin_unlock(&l_mg->close_lock);
  357. nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
  358. fail_free_bio:
  359. if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META))
  360. bio_put(bio);
  361. fail_free_rqd:
  362. pblk_free_rqd(pblk, rqd, READ);
  363. return ret;
  364. }
  365. static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
  366. int prev_n)
  367. {
  368. struct pblk_line_meta *lm = &pblk->lm;
  369. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  370. struct pblk_line *meta_line;
  371. spin_lock(&l_mg->close_lock);
  372. retry:
  373. if (list_empty(&l_mg->emeta_list)) {
  374. spin_unlock(&l_mg->close_lock);
  375. return 0;
  376. }
  377. meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
  378. if (bitmap_full(meta_line->map_bitmap, lm->sec_per_line))
  379. goto retry;
  380. spin_unlock(&l_mg->close_lock);
  381. if (!pblk_valid_meta_ppa(pblk, meta_line, prev_list, prev_n))
  382. return 0;
  383. return pblk_submit_meta_io(pblk, meta_line);
  384. }
  385. static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
  386. {
  387. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  388. struct ppa_addr erase_ppa;
  389. int err;
  390. ppa_set_empty(&erase_ppa);
  391. /* Assign lbas to ppas and populate request structure */
  392. err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa);
  393. if (err) {
  394. pr_err("pblk: could not setup write request: %d\n", err);
  395. return NVM_IO_ERR;
  396. }
  397. if (likely(ppa_empty(erase_ppa))) {
  398. /* Submit metadata write for previous data line */
  399. err = pblk_sched_meta_io(pblk, rqd->ppa_list, rqd->nr_ppas);
  400. if (err) {
  401. pr_err("pblk: metadata I/O submission failed: %d", err);
  402. return NVM_IO_ERR;
  403. }
  404. /* Submit data write for current data line */
  405. err = pblk_submit_io(pblk, rqd);
  406. if (err) {
  407. pr_err("pblk: data I/O submission failed: %d\n", err);
  408. return NVM_IO_ERR;
  409. }
  410. } else {
  411. /* Submit data write for current data line */
  412. err = pblk_submit_io(pblk, rqd);
  413. if (err) {
  414. pr_err("pblk: data I/O submission failed: %d\n", err);
  415. return NVM_IO_ERR;
  416. }
  417. /* Submit available erase for next data line */
  418. if (pblk_blk_erase_async(pblk, erase_ppa)) {
  419. struct pblk_line *e_line = pblk_line_get_erase(pblk);
  420. struct nvm_tgt_dev *dev = pblk->dev;
  421. struct nvm_geo *geo = &dev->geo;
  422. int bit;
  423. atomic_inc(&e_line->left_eblks);
  424. bit = pblk_ppa_to_pos(geo, erase_ppa);
  425. WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
  426. }
  427. }
  428. return NVM_IO_OK;
  429. }
  430. static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
  431. {
  432. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  433. struct bio *bio = rqd->bio;
  434. if (c_ctx->nr_padded)
  435. pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
  436. }
  437. static int pblk_submit_write(struct pblk *pblk)
  438. {
  439. struct bio *bio;
  440. struct nvm_rq *rqd;
  441. unsigned int secs_avail, secs_to_sync, secs_to_com;
  442. unsigned int secs_to_flush;
  443. unsigned long pos;
  444. /* If there are no sectors in the cache, flushes (bios without data)
  445. * will be cleared on the cache threads
  446. */
  447. secs_avail = pblk_rb_read_count(&pblk->rwb);
  448. if (!secs_avail)
  449. return 1;
  450. secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
  451. if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
  452. return 1;
  453. rqd = pblk_alloc_rqd(pblk, WRITE);
  454. if (IS_ERR(rqd)) {
  455. pr_err("pblk: cannot allocate write req.\n");
  456. return 1;
  457. }
  458. bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
  459. if (!bio) {
  460. pr_err("pblk: cannot allocate write bio\n");
  461. goto fail_free_rqd;
  462. }
  463. bio->bi_iter.bi_sector = 0; /* internal bio */
  464. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  465. rqd->bio = bio;
  466. secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
  467. if (secs_to_sync > pblk->max_write_pgs) {
  468. pr_err("pblk: bad buffer sync calculation\n");
  469. goto fail_put_bio;
  470. }
  471. secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
  472. pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
  473. if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync,
  474. secs_avail)) {
  475. pr_err("pblk: corrupted write bio\n");
  476. goto fail_put_bio;
  477. }
  478. if (pblk_submit_io_set(pblk, rqd))
  479. goto fail_free_bio;
  480. #ifdef CONFIG_NVM_DEBUG
  481. atomic_long_add(secs_to_sync, &pblk->sub_writes);
  482. #endif
  483. return 0;
  484. fail_free_bio:
  485. pblk_free_write_rqd(pblk, rqd);
  486. fail_put_bio:
  487. bio_put(bio);
  488. fail_free_rqd:
  489. pblk_free_rqd(pblk, rqd, WRITE);
  490. return 1;
  491. }
  492. int pblk_write_ts(void *data)
  493. {
  494. struct pblk *pblk = data;
  495. while (!kthread_should_stop()) {
  496. if (!pblk_submit_write(pblk))
  497. continue;
  498. set_current_state(TASK_INTERRUPTIBLE);
  499. io_schedule();
  500. }
  501. return 0;
  502. }