rrpc.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen
  3. * Initial release: Matias Bjorling <m@bjorling.me>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version
  7. * 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
  15. */
  16. #ifndef RRPC_H_
  17. #define RRPC_H_
  18. #include <linux/blkdev.h>
  19. #include <linux/blk-mq.h>
  20. #include <linux/bio.h>
  21. #include <linux/module.h>
  22. #include <linux/kthread.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/lightnvm.h>
  25. /* Run only GC if less than 1/X blocks are free */
  26. #define GC_LIMIT_INVERSE 10
  27. #define GC_TIME_SECS 100
  28. #define RRPC_SECTOR (512)
  29. #define RRPC_EXPOSED_PAGE_SIZE (4096)
  30. #define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
  31. struct rrpc_inflight {
  32. struct list_head reqs;
  33. spinlock_t lock;
  34. };
  35. struct rrpc_inflight_rq {
  36. struct list_head list;
  37. sector_t l_start;
  38. sector_t l_end;
  39. };
  40. struct rrpc_rq {
  41. struct rrpc_inflight_rq inflight_rq;
  42. unsigned long flags;
  43. };
  44. struct rrpc_block {
  45. int id; /* id inside of LUN */
  46. struct rrpc_lun *rlun;
  47. struct list_head prio; /* LUN CG list */
  48. struct list_head list; /* LUN free, used, bb list */
  49. #define MAX_INVALID_PAGES_STORAGE 8
  50. /* Bitmap for invalid page intries */
  51. unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
  52. /* points to the next writable page within a block */
  53. unsigned int next_page;
  54. /* number of pages that are invalid, wrt host page size */
  55. unsigned int nr_invalid_pages;
  56. int state;
  57. spinlock_t lock;
  58. atomic_t data_cmnt_size; /* data pages committed to stable storage */
  59. };
  60. struct rrpc_lun {
  61. struct rrpc *rrpc;
  62. int id;
  63. struct ppa_addr bppa;
  64. struct rrpc_block *cur, *gc_cur;
  65. struct rrpc_block *blocks; /* Reference to block allocation */
  66. struct list_head prio_list; /* Blocks that may be GC'ed */
  67. struct list_head wblk_list; /* Queued blocks to be written to */
  68. /* lun block lists */
  69. struct list_head used_list; /* In-use blocks */
  70. struct list_head free_list; /* Not used blocks i.e. released
  71. * and ready for use
  72. */
  73. struct list_head bb_list; /* Bad blocks. Mutually exclusive with
  74. * free_list and used_list
  75. */
  76. unsigned int nr_free_blocks; /* Number of unused blocks */
  77. struct work_struct ws_gc;
  78. int reserved_blocks;
  79. spinlock_t lock;
  80. };
  81. struct rrpc {
  82. struct nvm_tgt_dev *dev;
  83. struct gendisk *disk;
  84. sector_t soffset; /* logical sector offset */
  85. int nr_luns;
  86. struct rrpc_lun *luns;
  87. /* calculated values */
  88. unsigned long long nr_sects;
  89. /* Write strategy variables. Move these into each for structure for each
  90. * strategy
  91. */
  92. atomic_t next_lun; /* Whenever a page is written, this is updated
  93. * to point to the next write lun
  94. */
  95. spinlock_t bio_lock;
  96. struct bio_list requeue_bios;
  97. struct work_struct ws_requeue;
  98. /* Simple translation map of logical addresses to physical addresses.
  99. * The logical addresses is known by the host system, while the physical
  100. * addresses are used when writing to the disk block device.
  101. */
  102. struct rrpc_addr *trans_map;
  103. /* also store a reverse map for garbage collection */
  104. struct rrpc_rev_addr *rev_trans_map;
  105. spinlock_t rev_lock;
  106. struct rrpc_inflight inflights;
  107. mempool_t *addr_pool;
  108. mempool_t *page_pool;
  109. mempool_t *gcb_pool;
  110. mempool_t *rq_pool;
  111. struct timer_list gc_timer;
  112. struct workqueue_struct *krqd_wq;
  113. struct workqueue_struct *kgc_wq;
  114. };
  115. struct rrpc_block_gc {
  116. struct rrpc *rrpc;
  117. struct rrpc_block *rblk;
  118. struct work_struct ws_gc;
  119. };
  120. /* Logical to physical mapping */
  121. struct rrpc_addr {
  122. u64 addr;
  123. struct rrpc_block *rblk;
  124. };
  125. /* Physical to logical mapping */
  126. struct rrpc_rev_addr {
  127. u64 addr;
  128. };
  129. static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
  130. struct ppa_addr r)
  131. {
  132. struct ppa_addr l;
  133. int secs, pgs;
  134. sector_t ppa = r.ppa;
  135. l.ppa = 0;
  136. div_u64_rem(ppa, geo->sec_per_pg, &secs);
  137. l.g.sec = secs;
  138. sector_div(ppa, geo->sec_per_pg);
  139. div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
  140. l.g.pg = pgs;
  141. return l;
  142. }
  143. static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
  144. {
  145. return linear_to_generic_addr(&dev->geo, pba);
  146. }
  147. static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
  148. {
  149. struct nvm_tgt_dev *dev = rrpc->dev;
  150. struct nvm_geo *geo = &dev->geo;
  151. struct rrpc_lun *rlun = rblk->rlun;
  152. return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
  153. }
  154. static inline sector_t rrpc_get_laddr(struct bio *bio)
  155. {
  156. return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
  157. }
  158. static inline unsigned int rrpc_get_pages(struct bio *bio)
  159. {
  160. return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
  161. }
  162. static inline sector_t rrpc_get_sector(sector_t laddr)
  163. {
  164. return laddr * NR_PHY_IN_LOG;
  165. }
  166. static inline int request_intersects(struct rrpc_inflight_rq *r,
  167. sector_t laddr_start, sector_t laddr_end)
  168. {
  169. return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
  170. }
  171. static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
  172. unsigned int pages, struct rrpc_inflight_rq *r)
  173. {
  174. sector_t laddr_end = laddr + pages - 1;
  175. struct rrpc_inflight_rq *rtmp;
  176. WARN_ON(irqs_disabled());
  177. spin_lock_irq(&rrpc->inflights.lock);
  178. list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
  179. if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
  180. /* existing, overlapping request, come back later */
  181. spin_unlock_irq(&rrpc->inflights.lock);
  182. return 1;
  183. }
  184. }
  185. r->l_start = laddr;
  186. r->l_end = laddr_end;
  187. list_add_tail(&r->list, &rrpc->inflights.reqs);
  188. spin_unlock_irq(&rrpc->inflights.lock);
  189. return 0;
  190. }
  191. static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
  192. unsigned int pages,
  193. struct rrpc_inflight_rq *r)
  194. {
  195. BUG_ON((laddr + pages) > rrpc->nr_sects);
  196. return __rrpc_lock_laddr(rrpc, laddr, pages, r);
  197. }
  198. static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
  199. {
  200. struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
  201. return &rrqd->inflight_rq;
  202. }
  203. static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
  204. struct nvm_rq *rqd)
  205. {
  206. sector_t laddr = rrpc_get_laddr(bio);
  207. unsigned int pages = rrpc_get_pages(bio);
  208. struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
  209. return rrpc_lock_laddr(rrpc, laddr, pages, r);
  210. }
  211. static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
  212. struct rrpc_inflight_rq *r)
  213. {
  214. unsigned long flags;
  215. spin_lock_irqsave(&rrpc->inflights.lock, flags);
  216. list_del_init(&r->list);
  217. spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
  218. }
  219. static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
  220. {
  221. struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
  222. uint8_t pages = rqd->nr_ppas;
  223. BUG_ON((r->l_start + pages) > rrpc->nr_sects);
  224. rrpc_unlock_laddr(rrpc, r);
  225. }
  226. #endif /* RRPC_H_ */