pblk-write.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2016 CNEX Labs
  4. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  5. * Matias Bjorling <matias@cnexlabs.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License version
  9. * 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * pblk-write.c - pblk's write path from write buffer to media
  17. */
  18. #include "pblk.h"
  19. #include "pblk-trace.h"
  20. static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
  21. struct pblk_c_ctx *c_ctx)
  22. {
  23. struct bio *original_bio;
  24. struct pblk_rb *rwb = &pblk->rwb;
  25. unsigned long ret;
  26. int i;
  27. for (i = 0; i < c_ctx->nr_valid; i++) {
  28. struct pblk_w_ctx *w_ctx;
  29. int pos = c_ctx->sentry + i;
  30. int flags;
  31. w_ctx = pblk_rb_w_ctx(rwb, pos);
  32. flags = READ_ONCE(w_ctx->flags);
  33. if (flags & PBLK_FLUSH_ENTRY) {
  34. flags &= ~PBLK_FLUSH_ENTRY;
  35. /* Release flags on context. Protect from writes */
  36. smp_store_release(&w_ctx->flags, flags);
  37. #ifdef CONFIG_NVM_PBLK_DEBUG
  38. atomic_dec(&rwb->inflight_flush_point);
  39. #endif
  40. }
  41. while ((original_bio = bio_list_pop(&w_ctx->bios)))
  42. bio_endio(original_bio);
  43. }
  44. if (c_ctx->nr_padded)
  45. pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
  46. c_ctx->nr_padded);
  47. #ifdef CONFIG_NVM_PBLK_DEBUG
  48. atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
  49. #endif
  50. ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
  51. bio_put(rqd->bio);
  52. pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  53. return ret;
  54. }
  55. static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
  56. struct nvm_rq *rqd,
  57. struct pblk_c_ctx *c_ctx)
  58. {
  59. list_del(&c_ctx->list);
  60. return pblk_end_w_bio(pblk, rqd, c_ctx);
  61. }
  62. static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
  63. struct pblk_c_ctx *c_ctx)
  64. {
  65. struct pblk_c_ctx *c, *r;
  66. unsigned long flags;
  67. unsigned long pos;
  68. #ifdef CONFIG_NVM_PBLK_DEBUG
  69. atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
  70. #endif
  71. pblk_up_rq(pblk, c_ctx->lun_bitmap);
  72. pos = pblk_rb_sync_init(&pblk->rwb, &flags);
  73. if (pos == c_ctx->sentry) {
  74. pos = pblk_end_w_bio(pblk, rqd, c_ctx);
  75. retry:
  76. list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
  77. rqd = nvm_rq_from_c_ctx(c);
  78. if (c->sentry == pos) {
  79. pos = pblk_end_queued_w_bio(pblk, rqd, c);
  80. goto retry;
  81. }
  82. }
  83. } else {
  84. WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
  85. list_add_tail(&c_ctx->list, &pblk->compl_list);
  86. }
  87. pblk_rb_sync_end(&pblk->rwb, &flags);
  88. }
  89. /* Map remaining sectors in chunk, starting from ppa */
  90. static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
  91. {
  92. struct pblk_line *line;
  93. struct ppa_addr map_ppa = *ppa;
  94. u64 paddr;
  95. int done = 0;
  96. line = pblk_ppa_to_line(pblk, *ppa);
  97. spin_lock(&line->lock);
  98. while (!done) {
  99. paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
  100. if (!test_and_set_bit(paddr, line->map_bitmap))
  101. line->left_msecs--;
  102. if (!test_and_set_bit(paddr, line->invalid_bitmap))
  103. le32_add_cpu(line->vsc, -1);
  104. done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
  105. }
  106. line->w_err_gc->has_write_err = 1;
  107. spin_unlock(&line->lock);
  108. }
  109. static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
  110. unsigned int nr_entries)
  111. {
  112. struct pblk_rb *rb = &pblk->rwb;
  113. struct pblk_rb_entry *entry;
  114. struct pblk_line *line;
  115. struct pblk_w_ctx *w_ctx;
  116. struct ppa_addr ppa_l2p;
  117. int flags;
  118. unsigned int i;
  119. spin_lock(&pblk->trans_lock);
  120. for (i = 0; i < nr_entries; i++) {
  121. entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
  122. w_ctx = &entry->w_ctx;
  123. /* Check if the lba has been overwritten */
  124. ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
  125. if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
  126. w_ctx->lba = ADDR_EMPTY;
  127. /* Mark up the entry as submittable again */
  128. flags = READ_ONCE(w_ctx->flags);
  129. flags |= PBLK_WRITTEN_DATA;
  130. /* Release flags on write context. Protect from writes */
  131. smp_store_release(&w_ctx->flags, flags);
  132. /* Decrease the reference count to the line as we will
  133. * re-map these entries
  134. */
  135. line = pblk_ppa_to_line(pblk, w_ctx->ppa);
  136. kref_put(&line->ref, pblk_line_put);
  137. }
  138. spin_unlock(&pblk->trans_lock);
  139. }
  140. static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
  141. {
  142. struct pblk_c_ctx *r_ctx;
  143. r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
  144. if (!r_ctx)
  145. return;
  146. r_ctx->lun_bitmap = NULL;
  147. r_ctx->sentry = c_ctx->sentry;
  148. r_ctx->nr_valid = c_ctx->nr_valid;
  149. r_ctx->nr_padded = c_ctx->nr_padded;
  150. spin_lock(&pblk->resubmit_lock);
  151. list_add_tail(&r_ctx->list, &pblk->resubmit_list);
  152. spin_unlock(&pblk->resubmit_lock);
  153. #ifdef CONFIG_NVM_PBLK_DEBUG
  154. atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
  155. #endif
  156. }
  157. static void pblk_submit_rec(struct work_struct *work)
  158. {
  159. struct pblk_rec_ctx *recovery =
  160. container_of(work, struct pblk_rec_ctx, ws_rec);
  161. struct pblk *pblk = recovery->pblk;
  162. struct nvm_rq *rqd = recovery->rqd;
  163. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  164. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  165. pblk_log_write_err(pblk, rqd);
  166. pblk_map_remaining(pblk, ppa_list);
  167. pblk_queue_resubmit(pblk, c_ctx);
  168. pblk_up_rq(pblk, c_ctx->lun_bitmap);
  169. if (c_ctx->nr_padded)
  170. pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
  171. c_ctx->nr_padded);
  172. bio_put(rqd->bio);
  173. pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  174. mempool_free(recovery, &pblk->rec_pool);
  175. atomic_dec(&pblk->inflight_io);
  176. }
  177. static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
  178. {
  179. struct pblk_rec_ctx *recovery;
  180. recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
  181. if (!recovery) {
  182. pblk_err(pblk, "could not allocate recovery work\n");
  183. return;
  184. }
  185. recovery->pblk = pblk;
  186. recovery->rqd = rqd;
  187. INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
  188. queue_work(pblk->close_wq, &recovery->ws_rec);
  189. }
  190. static void pblk_end_io_write(struct nvm_rq *rqd)
  191. {
  192. struct pblk *pblk = rqd->private;
  193. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  194. if (rqd->error) {
  195. pblk_end_w_fail(pblk, rqd);
  196. return;
  197. } else {
  198. if (trace_pblk_chunk_state_enabled())
  199. pblk_check_chunk_state_update(pblk, rqd);
  200. #ifdef CONFIG_NVM_PBLK_DEBUG
  201. WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
  202. #endif
  203. }
  204. pblk_complete_write(pblk, rqd, c_ctx);
  205. atomic_dec(&pblk->inflight_io);
  206. }
  207. static void pblk_end_io_write_meta(struct nvm_rq *rqd)
  208. {
  209. struct pblk *pblk = rqd->private;
  210. struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
  211. struct pblk_line *line = m_ctx->private;
  212. struct pblk_emeta *emeta = line->emeta;
  213. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  214. int sync;
  215. pblk_up_chunk(pblk, ppa_list[0]);
  216. if (rqd->error) {
  217. pblk_log_write_err(pblk, rqd);
  218. pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
  219. line->w_err_gc->has_write_err = 1;
  220. } else {
  221. if (trace_pblk_chunk_state_enabled())
  222. pblk_check_chunk_state_update(pblk, rqd);
  223. }
  224. sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
  225. if (sync == emeta->nr_entries)
  226. pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
  227. GFP_ATOMIC, pblk->close_wq);
  228. pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
  229. atomic_dec(&pblk->inflight_io);
  230. }
  231. static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
  232. unsigned int nr_secs, nvm_end_io_fn(*end_io))
  233. {
  234. /* Setup write request */
  235. rqd->opcode = NVM_OP_PWRITE;
  236. rqd->nr_ppas = nr_secs;
  237. rqd->is_seq = 1;
  238. rqd->private = pblk;
  239. rqd->end_io = end_io;
  240. return pblk_alloc_rqd_meta(pblk, rqd);
  241. }
  242. static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
  243. struct ppa_addr *erase_ppa)
  244. {
  245. struct pblk_line_meta *lm = &pblk->lm;
  246. struct pblk_line *e_line = pblk_line_get_erase(pblk);
  247. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  248. unsigned int valid = c_ctx->nr_valid;
  249. unsigned int padded = c_ctx->nr_padded;
  250. unsigned int nr_secs = valid + padded;
  251. unsigned long *lun_bitmap;
  252. int ret;
  253. lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
  254. if (!lun_bitmap)
  255. return -ENOMEM;
  256. c_ctx->lun_bitmap = lun_bitmap;
  257. ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
  258. if (ret) {
  259. kfree(lun_bitmap);
  260. return ret;
  261. }
  262. if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
  263. pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
  264. else
  265. pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
  266. valid, erase_ppa);
  267. return 0;
  268. }
  269. static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
  270. unsigned int secs_to_flush)
  271. {
  272. int secs_to_sync;
  273. secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
  274. #ifdef CONFIG_NVM_PBLK_DEBUG
  275. if ((!secs_to_sync && secs_to_flush)
  276. || (secs_to_sync < 0)
  277. || (secs_to_sync > secs_avail && !secs_to_flush)) {
  278. pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
  279. secs_avail, secs_to_sync, secs_to_flush);
  280. }
  281. #endif
  282. return secs_to_sync;
  283. }
  284. int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
  285. {
  286. struct nvm_tgt_dev *dev = pblk->dev;
  287. struct nvm_geo *geo = &dev->geo;
  288. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  289. struct pblk_line_meta *lm = &pblk->lm;
  290. struct pblk_emeta *emeta = meta_line->emeta;
  291. struct ppa_addr *ppa_list;
  292. struct pblk_g_ctx *m_ctx;
  293. struct bio *bio;
  294. struct nvm_rq *rqd;
  295. void *data;
  296. u64 paddr;
  297. int rq_ppas = pblk->min_write_pgs;
  298. int id = meta_line->id;
  299. int rq_len;
  300. int i, j;
  301. int ret;
  302. rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
  303. m_ctx = nvm_rq_to_pdu(rqd);
  304. m_ctx->private = meta_line;
  305. rq_len = rq_ppas * geo->csecs;
  306. data = ((void *)emeta->buf) + emeta->mem;
  307. bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
  308. l_mg->emeta_alloc_type, GFP_KERNEL);
  309. if (IS_ERR(bio)) {
  310. pblk_err(pblk, "failed to map emeta io");
  311. ret = PTR_ERR(bio);
  312. goto fail_free_rqd;
  313. }
  314. bio->bi_iter.bi_sector = 0; /* internal bio */
  315. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  316. rqd->bio = bio;
  317. ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
  318. if (ret)
  319. goto fail_free_bio;
  320. ppa_list = nvm_rq_to_ppa_list(rqd);
  321. for (i = 0; i < rqd->nr_ppas; ) {
  322. spin_lock(&meta_line->lock);
  323. paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
  324. spin_unlock(&meta_line->lock);
  325. for (j = 0; j < rq_ppas; j++, i++, paddr++)
  326. ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
  327. }
  328. spin_lock(&l_mg->close_lock);
  329. emeta->mem += rq_len;
  330. if (emeta->mem >= lm->emeta_len[0])
  331. list_del(&meta_line->list);
  332. spin_unlock(&l_mg->close_lock);
  333. pblk_down_chunk(pblk, ppa_list[0]);
  334. ret = pblk_submit_io(pblk, rqd);
  335. if (ret) {
  336. pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
  337. goto fail_rollback;
  338. }
  339. return NVM_IO_OK;
  340. fail_rollback:
  341. pblk_up_chunk(pblk, ppa_list[0]);
  342. spin_lock(&l_mg->close_lock);
  343. pblk_dealloc_page(pblk, meta_line, rq_ppas);
  344. list_add(&meta_line->list, &meta_line->list);
  345. spin_unlock(&l_mg->close_lock);
  346. fail_free_bio:
  347. bio_put(bio);
  348. fail_free_rqd:
  349. pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
  350. return ret;
  351. }
  352. static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
  353. struct pblk_line *meta_line,
  354. struct nvm_rq *data_rqd)
  355. {
  356. struct nvm_tgt_dev *dev = pblk->dev;
  357. struct nvm_geo *geo = &dev->geo;
  358. struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
  359. struct pblk_line *data_line = pblk_line_get_data(pblk);
  360. struct ppa_addr ppa, ppa_opt;
  361. u64 paddr;
  362. int pos_opt;
  363. /* Schedule a metadata I/O that is half the distance from the data I/O
  364. * with regards to the number of LUNs forming the pblk instance. This
  365. * balances LUN conflicts across every I/O.
  366. *
  367. * When the LUN configuration changes (e.g., due to GC), this distance
  368. * can align, which would result on metadata and data I/Os colliding. In
  369. * this case, modify the distance to not be optimal, but move the
  370. * optimal in the right direction.
  371. */
  372. paddr = pblk_lookup_page(pblk, meta_line);
  373. ppa = addr_to_gen_ppa(pblk, paddr, 0);
  374. ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
  375. pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
  376. if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
  377. test_bit(pos_opt, data_line->blk_bitmap))
  378. return true;
  379. if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
  380. data_line->meta_distance--;
  381. return false;
  382. }
  383. static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
  384. struct nvm_rq *data_rqd)
  385. {
  386. struct pblk_line_meta *lm = &pblk->lm;
  387. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  388. struct pblk_line *meta_line;
  389. spin_lock(&l_mg->close_lock);
  390. if (list_empty(&l_mg->emeta_list)) {
  391. spin_unlock(&l_mg->close_lock);
  392. return NULL;
  393. }
  394. meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
  395. if (meta_line->emeta->mem >= lm->emeta_len[0]) {
  396. spin_unlock(&l_mg->close_lock);
  397. return NULL;
  398. }
  399. spin_unlock(&l_mg->close_lock);
  400. if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
  401. return NULL;
  402. return meta_line;
  403. }
  404. static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
  405. {
  406. struct ppa_addr erase_ppa;
  407. struct pblk_line *meta_line;
  408. int err;
  409. pblk_ppa_set_empty(&erase_ppa);
  410. /* Assign lbas to ppas and populate request structure */
  411. err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
  412. if (err) {
  413. pblk_err(pblk, "could not setup write request: %d\n", err);
  414. return NVM_IO_ERR;
  415. }
  416. meta_line = pblk_should_submit_meta_io(pblk, rqd);
  417. /* Submit data write for current data line */
  418. err = pblk_submit_io(pblk, rqd);
  419. if (err) {
  420. pblk_err(pblk, "data I/O submission failed: %d\n", err);
  421. return NVM_IO_ERR;
  422. }
  423. if (!pblk_ppa_empty(erase_ppa)) {
  424. /* Submit erase for next data line */
  425. if (pblk_blk_erase_async(pblk, erase_ppa)) {
  426. struct pblk_line *e_line = pblk_line_get_erase(pblk);
  427. struct nvm_tgt_dev *dev = pblk->dev;
  428. struct nvm_geo *geo = &dev->geo;
  429. int bit;
  430. atomic_inc(&e_line->left_eblks);
  431. bit = pblk_ppa_to_pos(geo, erase_ppa);
  432. WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
  433. }
  434. }
  435. if (meta_line) {
  436. /* Submit metadata write for previous data line */
  437. err = pblk_submit_meta_io(pblk, meta_line);
  438. if (err) {
  439. pblk_err(pblk, "metadata I/O submission failed: %d",
  440. err);
  441. return NVM_IO_ERR;
  442. }
  443. }
  444. return NVM_IO_OK;
  445. }
  446. static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
  447. {
  448. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  449. struct bio *bio = rqd->bio;
  450. if (c_ctx->nr_padded)
  451. pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
  452. c_ctx->nr_padded);
  453. }
  454. static int pblk_submit_write(struct pblk *pblk)
  455. {
  456. struct bio *bio;
  457. struct nvm_rq *rqd;
  458. unsigned int secs_avail, secs_to_sync, secs_to_com;
  459. unsigned int secs_to_flush;
  460. unsigned long pos;
  461. unsigned int resubmit;
  462. spin_lock(&pblk->resubmit_lock);
  463. resubmit = !list_empty(&pblk->resubmit_list);
  464. spin_unlock(&pblk->resubmit_lock);
  465. /* Resubmit failed writes first */
  466. if (resubmit) {
  467. struct pblk_c_ctx *r_ctx;
  468. spin_lock(&pblk->resubmit_lock);
  469. r_ctx = list_first_entry(&pblk->resubmit_list,
  470. struct pblk_c_ctx, list);
  471. list_del(&r_ctx->list);
  472. spin_unlock(&pblk->resubmit_lock);
  473. secs_avail = r_ctx->nr_valid;
  474. pos = r_ctx->sentry;
  475. pblk_prepare_resubmit(pblk, pos, secs_avail);
  476. secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
  477. secs_avail);
  478. kfree(r_ctx);
  479. } else {
  480. /* If there are no sectors in the cache,
  481. * flushes (bios without data) will be cleared on
  482. * the cache threads
  483. */
  484. secs_avail = pblk_rb_read_count(&pblk->rwb);
  485. if (!secs_avail)
  486. return 1;
  487. secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
  488. if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
  489. return 1;
  490. secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
  491. secs_to_flush);
  492. if (secs_to_sync > pblk->max_write_pgs) {
  493. pblk_err(pblk, "bad buffer sync calculation\n");
  494. return 1;
  495. }
  496. secs_to_com = (secs_to_sync > secs_avail) ?
  497. secs_avail : secs_to_sync;
  498. pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
  499. }
  500. bio = bio_alloc(GFP_KERNEL, secs_to_sync);
  501. bio->bi_iter.bi_sector = 0; /* internal bio */
  502. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  503. rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
  504. rqd->bio = bio;
  505. if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
  506. secs_avail)) {
  507. pblk_err(pblk, "corrupted write bio\n");
  508. goto fail_put_bio;
  509. }
  510. if (pblk_submit_io_set(pblk, rqd))
  511. goto fail_free_bio;
  512. #ifdef CONFIG_NVM_PBLK_DEBUG
  513. atomic_long_add(secs_to_sync, &pblk->sub_writes);
  514. #endif
  515. return 0;
  516. fail_free_bio:
  517. pblk_free_write_rqd(pblk, rqd);
  518. fail_put_bio:
  519. bio_put(bio);
  520. pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  521. return 1;
  522. }
  523. int pblk_write_ts(void *data)
  524. {
  525. struct pblk *pblk = data;
  526. while (!kthread_should_stop()) {
  527. if (!pblk_submit_write(pblk))
  528. continue;
  529. set_current_state(TASK_INTERRUPTIBLE);
  530. io_schedule();
  531. }
  532. return 0;
  533. }