pblk-map.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-map.c - pblk's lba-ppa mapping strategy
  16. *
  17. */
  18. #include "pblk.h"
  19. static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
  20. struct ppa_addr *ppa_list,
  21. unsigned long *lun_bitmap,
  22. struct pblk_sec_meta *meta_list,
  23. unsigned int valid_secs)
  24. {
  25. struct pblk_line *line = pblk_line_get_data(pblk);
  26. struct pblk_emeta *emeta = line->emeta;
  27. struct pblk_w_ctx *w_ctx;
  28. __le64 *lba_list = emeta_to_lbas(pblk, emeta->buf);
  29. u64 paddr;
  30. int nr_secs = pblk->min_write_pgs;
  31. int i;
  32. paddr = pblk_alloc_page(pblk, line, nr_secs);
  33. for (i = 0; i < nr_secs; i++, paddr++) {
  34. /* ppa to be sent to the device */
  35. ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
  36. /* Write context for target bio completion on write buffer. Note
  37. * that the write buffer is protected by the sync backpointer,
  38. * and a single writer thread have access to each specific entry
  39. * at a time. Thus, it is safe to modify the context for the
  40. * entry we are setting up for submission without taking any
  41. * lock or memory barrier.
  42. */
  43. if (i < valid_secs) {
  44. kref_get(&line->ref);
  45. w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i);
  46. w_ctx->ppa = ppa_list[i];
  47. meta_list[i].lba = cpu_to_le64(w_ctx->lba);
  48. lba_list[paddr] = cpu_to_le64(w_ctx->lba);
  49. line->nr_valid_lbas++;
  50. } else {
  51. __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
  52. lba_list[paddr] = meta_list[i].lba = addr_empty;
  53. __pblk_map_invalidate(pblk, line, paddr);
  54. }
  55. }
  56. if (pblk_line_is_full(line)) {
  57. struct pblk_line *prev_line = line;
  58. pblk_line_replace_data(pblk);
  59. pblk_line_close_meta(pblk, prev_line);
  60. }
  61. pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap);
  62. }
  63. void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
  64. unsigned long *lun_bitmap, unsigned int valid_secs,
  65. unsigned int off)
  66. {
  67. struct pblk_sec_meta *meta_list = rqd->meta_list;
  68. unsigned int map_secs;
  69. int min = pblk->min_write_pgs;
  70. int i;
  71. for (i = off; i < rqd->nr_ppas; i += min) {
  72. map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
  73. pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
  74. lun_bitmap, &meta_list[i], map_secs);
  75. }
  76. }
  77. /* only if erase_ppa is set, acquire erase semaphore */
  78. void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
  79. unsigned int sentry, unsigned long *lun_bitmap,
  80. unsigned int valid_secs, struct ppa_addr *erase_ppa)
  81. {
  82. struct nvm_tgt_dev *dev = pblk->dev;
  83. struct nvm_geo *geo = &dev->geo;
  84. struct pblk_line_meta *lm = &pblk->lm;
  85. struct pblk_sec_meta *meta_list = rqd->meta_list;
  86. struct pblk_line *e_line, *d_line;
  87. unsigned int map_secs;
  88. int min = pblk->min_write_pgs;
  89. int i, erase_lun;
  90. for (i = 0; i < rqd->nr_ppas; i += min) {
  91. map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
  92. pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
  93. lun_bitmap, &meta_list[i], map_secs);
  94. erase_lun = pblk_ppa_to_pos(geo, rqd->ppa_list[i]);
  95. /* line can change after page map. We might also be writing the
  96. * last line.
  97. */
  98. e_line = pblk_line_get_erase(pblk);
  99. if (!e_line)
  100. return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
  101. valid_secs, i + min);
  102. spin_lock(&e_line->lock);
  103. if (!test_bit(erase_lun, e_line->erase_bitmap)) {
  104. set_bit(erase_lun, e_line->erase_bitmap);
  105. atomic_dec(&e_line->left_eblks);
  106. *erase_ppa = rqd->ppa_list[i];
  107. erase_ppa->g.blk = e_line->id;
  108. spin_unlock(&e_line->lock);
  109. /* Avoid evaluating e_line->left_eblks */
  110. return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
  111. valid_secs, i + min);
  112. }
  113. spin_unlock(&e_line->lock);
  114. }
  115. d_line = pblk_line_get_data(pblk);
  116. /* line can change after page map. We might also be writing the
  117. * last line.
  118. */
  119. e_line = pblk_line_get_erase(pblk);
  120. if (!e_line)
  121. return;
  122. /* Erase blocks that are bad in this line but might not be in next */
  123. if (unlikely(ppa_empty(*erase_ppa)) &&
  124. bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
  125. int bit = -1;
  126. retry:
  127. bit = find_next_bit(d_line->blk_bitmap,
  128. lm->blk_per_line, bit + 1);
  129. if (bit >= lm->blk_per_line)
  130. return;
  131. spin_lock(&e_line->lock);
  132. if (test_bit(bit, e_line->erase_bitmap)) {
  133. spin_unlock(&e_line->lock);
  134. goto retry;
  135. }
  136. spin_unlock(&e_line->lock);
  137. set_bit(bit, e_line->erase_bitmap);
  138. atomic_dec(&e_line->left_eblks);
  139. *erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
  140. erase_ppa->g.blk = e_line->id;
  141. }
  142. }