pblk-write.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-write.c - pblk's write path from write buffer to media
  16. */
  17. #include "pblk.h"
  18. static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
  19. struct pblk_c_ctx *c_ctx)
  20. {
  21. struct bio *original_bio;
  22. struct pblk_rb *rwb = &pblk->rwb;
  23. unsigned long ret;
  24. int i;
  25. for (i = 0; i < c_ctx->nr_valid; i++) {
  26. struct pblk_w_ctx *w_ctx;
  27. int pos = c_ctx->sentry + i;
  28. int flags;
  29. w_ctx = pblk_rb_w_ctx(rwb, pos);
  30. flags = READ_ONCE(w_ctx->flags);
  31. if (flags & PBLK_FLUSH_ENTRY) {
  32. flags &= ~PBLK_FLUSH_ENTRY;
  33. /* Release flags on context. Protect from writes */
  34. smp_store_release(&w_ctx->flags, flags);
  35. #ifdef CONFIG_NVM_DEBUG
  36. atomic_dec(&rwb->inflight_flush_point);
  37. #endif
  38. }
  39. while ((original_bio = bio_list_pop(&w_ctx->bios)))
  40. bio_endio(original_bio);
  41. }
  42. if (c_ctx->nr_padded)
  43. pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
  44. c_ctx->nr_padded);
  45. #ifdef CONFIG_NVM_DEBUG
  46. atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
  47. #endif
  48. ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
  49. bio_put(rqd->bio);
  50. pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  51. return ret;
  52. }
  53. static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
  54. struct nvm_rq *rqd,
  55. struct pblk_c_ctx *c_ctx)
  56. {
  57. list_del(&c_ctx->list);
  58. return pblk_end_w_bio(pblk, rqd, c_ctx);
  59. }
  60. static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
  61. struct pblk_c_ctx *c_ctx)
  62. {
  63. struct pblk_c_ctx *c, *r;
  64. unsigned long flags;
  65. unsigned long pos;
  66. #ifdef CONFIG_NVM_DEBUG
  67. atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
  68. #endif
  69. pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
  70. pos = pblk_rb_sync_init(&pblk->rwb, &flags);
  71. if (pos == c_ctx->sentry) {
  72. pos = pblk_end_w_bio(pblk, rqd, c_ctx);
  73. retry:
  74. list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
  75. rqd = nvm_rq_from_c_ctx(c);
  76. if (c->sentry == pos) {
  77. pos = pblk_end_queued_w_bio(pblk, rqd, c);
  78. goto retry;
  79. }
  80. }
  81. } else {
  82. WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
  83. list_add_tail(&c_ctx->list, &pblk->compl_list);
  84. }
  85. pblk_rb_sync_end(&pblk->rwb, &flags);
  86. }
  87. /* Map remaining sectors in chunk, starting from ppa */
  88. static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
  89. {
  90. struct nvm_tgt_dev *dev = pblk->dev;
  91. struct nvm_geo *geo = &dev->geo;
  92. struct pblk_line *line;
  93. struct ppa_addr map_ppa = *ppa;
  94. u64 paddr;
  95. int done = 0;
  96. line = &pblk->lines[pblk_ppa_to_line(*ppa)];
  97. spin_lock(&line->lock);
  98. while (!done) {
  99. paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
  100. if (!test_and_set_bit(paddr, line->map_bitmap))
  101. line->left_msecs--;
  102. if (!test_and_set_bit(paddr, line->invalid_bitmap))
  103. le32_add_cpu(line->vsc, -1);
  104. if (geo->version == NVM_OCSSD_SPEC_12) {
  105. map_ppa.ppa++;
  106. if (map_ppa.g.pg == geo->num_pg)
  107. done = 1;
  108. } else {
  109. map_ppa.m.sec++;
  110. if (map_ppa.m.sec == geo->clba)
  111. done = 1;
  112. }
  113. }
  114. line->w_err_gc->has_write_err = 1;
  115. spin_unlock(&line->lock);
  116. }
  117. static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
  118. unsigned int nr_entries)
  119. {
  120. struct pblk_rb *rb = &pblk->rwb;
  121. struct pblk_rb_entry *entry;
  122. struct pblk_line *line;
  123. struct pblk_w_ctx *w_ctx;
  124. struct ppa_addr ppa_l2p;
  125. int flags;
  126. unsigned int pos, i;
  127. spin_lock(&pblk->trans_lock);
  128. pos = sentry;
  129. for (i = 0; i < nr_entries; i++) {
  130. entry = &rb->entries[pos];
  131. w_ctx = &entry->w_ctx;
  132. /* Check if the lba has been overwritten */
  133. ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
  134. if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
  135. w_ctx->lba = ADDR_EMPTY;
  136. /* Mark up the entry as submittable again */
  137. flags = READ_ONCE(w_ctx->flags);
  138. flags |= PBLK_WRITTEN_DATA;
  139. /* Release flags on write context. Protect from writes */
  140. smp_store_release(&w_ctx->flags, flags);
  141. /* Decrese the reference count to the line as we will
  142. * re-map these entries
  143. */
  144. line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
  145. kref_put(&line->ref, pblk_line_put);
  146. pos = (pos + 1) & (rb->nr_entries - 1);
  147. }
  148. spin_unlock(&pblk->trans_lock);
  149. }
  150. static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
  151. {
  152. struct pblk_c_ctx *r_ctx;
  153. r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
  154. if (!r_ctx)
  155. return;
  156. r_ctx->lun_bitmap = NULL;
  157. r_ctx->sentry = c_ctx->sentry;
  158. r_ctx->nr_valid = c_ctx->nr_valid;
  159. r_ctx->nr_padded = c_ctx->nr_padded;
  160. spin_lock(&pblk->resubmit_lock);
  161. list_add_tail(&r_ctx->list, &pblk->resubmit_list);
  162. spin_unlock(&pblk->resubmit_lock);
  163. #ifdef CONFIG_NVM_DEBUG
  164. atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
  165. #endif
  166. }
  167. static void pblk_submit_rec(struct work_struct *work)
  168. {
  169. struct pblk_rec_ctx *recovery =
  170. container_of(work, struct pblk_rec_ctx, ws_rec);
  171. struct pblk *pblk = recovery->pblk;
  172. struct nvm_rq *rqd = recovery->rqd;
  173. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  174. struct ppa_addr *ppa_list;
  175. pblk_log_write_err(pblk, rqd);
  176. if (rqd->nr_ppas == 1)
  177. ppa_list = &rqd->ppa_addr;
  178. else
  179. ppa_list = rqd->ppa_list;
  180. pblk_map_remaining(pblk, ppa_list);
  181. pblk_queue_resubmit(pblk, c_ctx);
  182. pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
  183. if (c_ctx->nr_padded)
  184. pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
  185. c_ctx->nr_padded);
  186. bio_put(rqd->bio);
  187. pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  188. mempool_free(recovery, &pblk->rec_pool);
  189. atomic_dec(&pblk->inflight_io);
  190. }
  191. static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
  192. {
  193. struct pblk_rec_ctx *recovery;
  194. recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
  195. if (!recovery) {
  196. pr_err("pblk: could not allocate recovery work\n");
  197. return;
  198. }
  199. recovery->pblk = pblk;
  200. recovery->rqd = rqd;
  201. INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
  202. queue_work(pblk->close_wq, &recovery->ws_rec);
  203. }
  204. static void pblk_end_io_write(struct nvm_rq *rqd)
  205. {
  206. struct pblk *pblk = rqd->private;
  207. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  208. if (rqd->error) {
  209. pblk_end_w_fail(pblk, rqd);
  210. return;
  211. }
  212. #ifdef CONFIG_NVM_DEBUG
  213. else
  214. WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
  215. #endif
  216. pblk_complete_write(pblk, rqd, c_ctx);
  217. atomic_dec(&pblk->inflight_io);
  218. }
  219. static void pblk_end_io_write_meta(struct nvm_rq *rqd)
  220. {
  221. struct pblk *pblk = rqd->private;
  222. struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
  223. struct pblk_line *line = m_ctx->private;
  224. struct pblk_emeta *emeta = line->emeta;
  225. int sync;
  226. pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
  227. if (rqd->error) {
  228. pblk_log_write_err(pblk, rqd);
  229. pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
  230. line->w_err_gc->has_write_err = 1;
  231. }
  232. sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
  233. if (sync == emeta->nr_entries)
  234. pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
  235. GFP_ATOMIC, pblk->close_wq);
  236. pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
  237. atomic_dec(&pblk->inflight_io);
  238. }
  239. static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
  240. unsigned int nr_secs,
  241. nvm_end_io_fn(*end_io))
  242. {
  243. struct nvm_tgt_dev *dev = pblk->dev;
  244. /* Setup write request */
  245. rqd->opcode = NVM_OP_PWRITE;
  246. rqd->nr_ppas = nr_secs;
  247. rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
  248. rqd->private = pblk;
  249. rqd->end_io = end_io;
  250. rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  251. &rqd->dma_meta_list);
  252. if (!rqd->meta_list)
  253. return -ENOMEM;
  254. rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
  255. rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
  256. return 0;
  257. }
  258. static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
  259. struct ppa_addr *erase_ppa)
  260. {
  261. struct pblk_line_meta *lm = &pblk->lm;
  262. struct pblk_line *e_line = pblk_line_get_erase(pblk);
  263. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  264. unsigned int valid = c_ctx->nr_valid;
  265. unsigned int padded = c_ctx->nr_padded;
  266. unsigned int nr_secs = valid + padded;
  267. unsigned long *lun_bitmap;
  268. int ret;
  269. lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
  270. if (!lun_bitmap)
  271. return -ENOMEM;
  272. c_ctx->lun_bitmap = lun_bitmap;
  273. ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
  274. if (ret) {
  275. kfree(lun_bitmap);
  276. return ret;
  277. }
  278. if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
  279. pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
  280. else
  281. pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
  282. valid, erase_ppa);
  283. return 0;
  284. }
  285. static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
  286. unsigned int secs_to_flush)
  287. {
  288. int secs_to_sync;
  289. secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
  290. #ifdef CONFIG_NVM_DEBUG
  291. if ((!secs_to_sync && secs_to_flush)
  292. || (secs_to_sync < 0)
  293. || (secs_to_sync > secs_avail && !secs_to_flush)) {
  294. pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
  295. secs_avail, secs_to_sync, secs_to_flush);
  296. }
  297. #endif
  298. return secs_to_sync;
  299. }
  300. int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
  301. {
  302. struct nvm_tgt_dev *dev = pblk->dev;
  303. struct nvm_geo *geo = &dev->geo;
  304. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  305. struct pblk_line_meta *lm = &pblk->lm;
  306. struct pblk_emeta *emeta = meta_line->emeta;
  307. struct pblk_g_ctx *m_ctx;
  308. struct bio *bio;
  309. struct nvm_rq *rqd;
  310. void *data;
  311. u64 paddr;
  312. int rq_ppas = pblk->min_write_pgs;
  313. int id = meta_line->id;
  314. int rq_len;
  315. int i, j;
  316. int ret;
  317. rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
  318. m_ctx = nvm_rq_to_pdu(rqd);
  319. m_ctx->private = meta_line;
  320. rq_len = rq_ppas * geo->csecs;
  321. data = ((void *)emeta->buf) + emeta->mem;
  322. bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
  323. l_mg->emeta_alloc_type, GFP_KERNEL);
  324. if (IS_ERR(bio)) {
  325. pr_err("pblk: failed to map emeta io");
  326. ret = PTR_ERR(bio);
  327. goto fail_free_rqd;
  328. }
  329. bio->bi_iter.bi_sector = 0; /* internal bio */
  330. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  331. rqd->bio = bio;
  332. ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
  333. if (ret)
  334. goto fail_free_bio;
  335. for (i = 0; i < rqd->nr_ppas; ) {
  336. spin_lock(&meta_line->lock);
  337. paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
  338. spin_unlock(&meta_line->lock);
  339. for (j = 0; j < rq_ppas; j++, i++, paddr++)
  340. rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
  341. }
  342. emeta->mem += rq_len;
  343. if (emeta->mem >= lm->emeta_len[0]) {
  344. spin_lock(&l_mg->close_lock);
  345. list_del(&meta_line->list);
  346. spin_unlock(&l_mg->close_lock);
  347. }
  348. pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
  349. ret = pblk_submit_io(pblk, rqd);
  350. if (ret) {
  351. pr_err("pblk: emeta I/O submission failed: %d\n", ret);
  352. goto fail_rollback;
  353. }
  354. return NVM_IO_OK;
  355. fail_rollback:
  356. pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
  357. spin_lock(&l_mg->close_lock);
  358. pblk_dealloc_page(pblk, meta_line, rq_ppas);
  359. list_add(&meta_line->list, &meta_line->list);
  360. spin_unlock(&l_mg->close_lock);
  361. fail_free_bio:
  362. bio_put(bio);
  363. fail_free_rqd:
  364. pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
  365. return ret;
  366. }
  367. static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
  368. struct pblk_line *meta_line,
  369. struct nvm_rq *data_rqd)
  370. {
  371. struct nvm_tgt_dev *dev = pblk->dev;
  372. struct nvm_geo *geo = &dev->geo;
  373. struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
  374. struct pblk_line *data_line = pblk_line_get_data(pblk);
  375. struct ppa_addr ppa, ppa_opt;
  376. u64 paddr;
  377. int pos_opt;
  378. /* Schedule a metadata I/O that is half the distance from the data I/O
  379. * with regards to the number of LUNs forming the pblk instance. This
  380. * balances LUN conflicts across every I/O.
  381. *
  382. * When the LUN configuration changes (e.g., due to GC), this distance
  383. * can align, which would result on metadata and data I/Os colliding. In
  384. * this case, modify the distance to not be optimal, but move the
  385. * optimal in the right direction.
  386. */
  387. paddr = pblk_lookup_page(pblk, meta_line);
  388. ppa = addr_to_gen_ppa(pblk, paddr, 0);
  389. ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
  390. pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
  391. if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
  392. test_bit(pos_opt, data_line->blk_bitmap))
  393. return true;
  394. if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
  395. data_line->meta_distance--;
  396. return false;
  397. }
  398. static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
  399. struct nvm_rq *data_rqd)
  400. {
  401. struct pblk_line_meta *lm = &pblk->lm;
  402. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  403. struct pblk_line *meta_line;
  404. spin_lock(&l_mg->close_lock);
  405. retry:
  406. if (list_empty(&l_mg->emeta_list)) {
  407. spin_unlock(&l_mg->close_lock);
  408. return NULL;
  409. }
  410. meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
  411. if (meta_line->emeta->mem >= lm->emeta_len[0])
  412. goto retry;
  413. spin_unlock(&l_mg->close_lock);
  414. if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
  415. return NULL;
  416. return meta_line;
  417. }
  418. static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
  419. {
  420. struct ppa_addr erase_ppa;
  421. struct pblk_line *meta_line;
  422. int err;
  423. pblk_ppa_set_empty(&erase_ppa);
  424. /* Assign lbas to ppas and populate request structure */
  425. err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
  426. if (err) {
  427. pr_err("pblk: could not setup write request: %d\n", err);
  428. return NVM_IO_ERR;
  429. }
  430. meta_line = pblk_should_submit_meta_io(pblk, rqd);
  431. /* Submit data write for current data line */
  432. err = pblk_submit_io(pblk, rqd);
  433. if (err) {
  434. pr_err("pblk: data I/O submission failed: %d\n", err);
  435. return NVM_IO_ERR;
  436. }
  437. if (!pblk_ppa_empty(erase_ppa)) {
  438. /* Submit erase for next data line */
  439. if (pblk_blk_erase_async(pblk, erase_ppa)) {
  440. struct pblk_line *e_line = pblk_line_get_erase(pblk);
  441. struct nvm_tgt_dev *dev = pblk->dev;
  442. struct nvm_geo *geo = &dev->geo;
  443. int bit;
  444. atomic_inc(&e_line->left_eblks);
  445. bit = pblk_ppa_to_pos(geo, erase_ppa);
  446. WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
  447. }
  448. }
  449. if (meta_line) {
  450. /* Submit metadata write for previous data line */
  451. err = pblk_submit_meta_io(pblk, meta_line);
  452. if (err) {
  453. pr_err("pblk: metadata I/O submission failed: %d", err);
  454. return NVM_IO_ERR;
  455. }
  456. }
  457. return NVM_IO_OK;
  458. }
  459. static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
  460. {
  461. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  462. struct bio *bio = rqd->bio;
  463. if (c_ctx->nr_padded)
  464. pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
  465. c_ctx->nr_padded);
  466. }
  467. static int pblk_submit_write(struct pblk *pblk)
  468. {
  469. struct bio *bio;
  470. struct nvm_rq *rqd;
  471. unsigned int secs_avail, secs_to_sync, secs_to_com;
  472. unsigned int secs_to_flush;
  473. unsigned long pos;
  474. unsigned int resubmit;
  475. spin_lock(&pblk->resubmit_lock);
  476. resubmit = !list_empty(&pblk->resubmit_list);
  477. spin_unlock(&pblk->resubmit_lock);
  478. /* Resubmit failed writes first */
  479. if (resubmit) {
  480. struct pblk_c_ctx *r_ctx;
  481. spin_lock(&pblk->resubmit_lock);
  482. r_ctx = list_first_entry(&pblk->resubmit_list,
  483. struct pblk_c_ctx, list);
  484. list_del(&r_ctx->list);
  485. spin_unlock(&pblk->resubmit_lock);
  486. secs_avail = r_ctx->nr_valid;
  487. pos = r_ctx->sentry;
  488. pblk_prepare_resubmit(pblk, pos, secs_avail);
  489. secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
  490. secs_avail);
  491. kfree(r_ctx);
  492. } else {
  493. /* If there are no sectors in the cache,
  494. * flushes (bios without data) will be cleared on
  495. * the cache threads
  496. */
  497. secs_avail = pblk_rb_read_count(&pblk->rwb);
  498. if (!secs_avail)
  499. return 1;
  500. secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
  501. if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
  502. return 1;
  503. secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
  504. secs_to_flush);
  505. if (secs_to_sync > pblk->max_write_pgs) {
  506. pr_err("pblk: bad buffer sync calculation\n");
  507. return 1;
  508. }
  509. secs_to_com = (secs_to_sync > secs_avail) ?
  510. secs_avail : secs_to_sync;
  511. pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
  512. }
  513. bio = bio_alloc(GFP_KERNEL, secs_to_sync);
  514. bio->bi_iter.bi_sector = 0; /* internal bio */
  515. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  516. rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
  517. rqd->bio = bio;
  518. if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
  519. secs_avail)) {
  520. pr_err("pblk: corrupted write bio\n");
  521. goto fail_put_bio;
  522. }
  523. if (pblk_submit_io_set(pblk, rqd))
  524. goto fail_free_bio;
  525. #ifdef CONFIG_NVM_DEBUG
  526. atomic_long_add(secs_to_sync, &pblk->sub_writes);
  527. #endif
  528. return 0;
  529. fail_free_bio:
  530. pblk_free_write_rqd(pblk, rqd);
  531. fail_put_bio:
  532. bio_put(bio);
  533. pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  534. return 1;
  535. }
  536. int pblk_write_ts(void *data)
  537. {
  538. struct pblk *pblk = data;
  539. while (!kthread_should_stop()) {
  540. if (!pblk_submit_write(pblk))
  541. continue;
  542. set_current_state(TASK_INTERRUPTIBLE);
  543. io_schedule();
  544. }
  545. return 0;
  546. }