rrpc.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen
  3. * Initial release: Matias Bjorling <m@bjorling.me>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version
  7. * 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
  15. */
  16. #include "rrpc.h"
  17. static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
  18. static DECLARE_RWSEM(rrpc_lock);
  19. static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
  20. struct nvm_rq *rqd, unsigned long flags);
  21. #define rrpc_for_each_lun(rrpc, rlun, i) \
  22. for ((i) = 0, rlun = &(rrpc)->luns[0]; \
  23. (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
  24. static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
  25. {
  26. struct nvm_tgt_dev *dev = rrpc->dev;
  27. struct rrpc_block *rblk = a->rblk;
  28. unsigned int pg_offset;
  29. lockdep_assert_held(&rrpc->rev_lock);
  30. if (a->addr == ADDR_EMPTY || !rblk)
  31. return;
  32. spin_lock(&rblk->lock);
  33. div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
  34. WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
  35. rblk->nr_invalid_pages++;
  36. spin_unlock(&rblk->lock);
  37. rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
  38. }
  39. static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
  40. unsigned int len)
  41. {
  42. sector_t i;
  43. spin_lock(&rrpc->rev_lock);
  44. for (i = slba; i < slba + len; i++) {
  45. struct rrpc_addr *gp = &rrpc->trans_map[i];
  46. rrpc_page_invalidate(rrpc, gp);
  47. gp->rblk = NULL;
  48. }
  49. spin_unlock(&rrpc->rev_lock);
  50. }
  51. static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
  52. sector_t laddr, unsigned int pages)
  53. {
  54. struct nvm_rq *rqd;
  55. struct rrpc_inflight_rq *inf;
  56. rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
  57. if (!rqd)
  58. return ERR_PTR(-ENOMEM);
  59. inf = rrpc_get_inflight_rq(rqd);
  60. if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
  61. mempool_free(rqd, rrpc->rq_pool);
  62. return NULL;
  63. }
  64. return rqd;
  65. }
  66. static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
  67. {
  68. struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
  69. rrpc_unlock_laddr(rrpc, inf);
  70. mempool_free(rqd, rrpc->rq_pool);
  71. }
  72. static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
  73. {
  74. sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
  75. sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
  76. struct nvm_rq *rqd;
  77. while (1) {
  78. rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
  79. if (rqd)
  80. break;
  81. schedule();
  82. }
  83. if (IS_ERR(rqd)) {
  84. pr_err("rrpc: unable to acquire inflight IO\n");
  85. bio_io_error(bio);
  86. return;
  87. }
  88. rrpc_invalidate_range(rrpc, slba, len);
  89. rrpc_inflight_laddr_release(rrpc, rqd);
  90. }
  91. static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
  92. {
  93. struct nvm_tgt_dev *dev = rrpc->dev;
  94. return (rblk->next_page == dev->geo.sec_per_blk);
  95. }
  96. /* Calculate relative addr for the given block, considering instantiated LUNs */
  97. static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
  98. {
  99. struct nvm_tgt_dev *dev = rrpc->dev;
  100. struct rrpc_lun *rlun = rblk->rlun;
  101. return rlun->id * dev->geo.sec_per_blk;
  102. }
  103. static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
  104. struct rrpc_addr *gp)
  105. {
  106. struct rrpc_block *rblk = gp->rblk;
  107. struct rrpc_lun *rlun = rblk->rlun;
  108. u64 addr = gp->addr;
  109. struct ppa_addr paddr;
  110. paddr.ppa = addr;
  111. paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
  112. paddr.g.ch = rlun->bppa.g.ch;
  113. paddr.g.lun = rlun->bppa.g.lun;
  114. paddr.g.blk = rblk->id;
  115. return paddr;
  116. }
  117. /* requires lun->lock taken */
  118. static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
  119. struct rrpc_block **cur_rblk)
  120. {
  121. struct rrpc *rrpc = rlun->rrpc;
  122. if (*cur_rblk) {
  123. spin_lock(&(*cur_rblk)->lock);
  124. WARN_ON(!block_is_full(rrpc, *cur_rblk));
  125. spin_unlock(&(*cur_rblk)->lock);
  126. }
  127. *cur_rblk = new_rblk;
  128. }
  129. static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
  130. struct rrpc_lun *rlun)
  131. {
  132. struct rrpc_block *rblk = NULL;
  133. if (list_empty(&rlun->free_list))
  134. goto out;
  135. rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
  136. list_move_tail(&rblk->list, &rlun->used_list);
  137. rblk->state = NVM_BLK_ST_TGT;
  138. rlun->nr_free_blocks--;
  139. out:
  140. return rblk;
  141. }
  142. static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
  143. unsigned long flags)
  144. {
  145. struct nvm_tgt_dev *dev = rrpc->dev;
  146. struct rrpc_block *rblk;
  147. int is_gc = flags & NVM_IOTYPE_GC;
  148. spin_lock(&rlun->lock);
  149. if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
  150. pr_err("nvm: rrpc: cannot give block to non GC request\n");
  151. spin_unlock(&rlun->lock);
  152. return NULL;
  153. }
  154. rblk = __rrpc_get_blk(rrpc, rlun);
  155. if (!rblk) {
  156. pr_err("nvm: rrpc: cannot get new block\n");
  157. spin_unlock(&rlun->lock);
  158. return NULL;
  159. }
  160. spin_unlock(&rlun->lock);
  161. bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
  162. rblk->next_page = 0;
  163. rblk->nr_invalid_pages = 0;
  164. atomic_set(&rblk->data_cmnt_size, 0);
  165. return rblk;
  166. }
  167. static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
  168. {
  169. struct rrpc_lun *rlun = rblk->rlun;
  170. spin_lock(&rlun->lock);
  171. if (rblk->state & NVM_BLK_ST_TGT) {
  172. list_move_tail(&rblk->list, &rlun->free_list);
  173. rlun->nr_free_blocks++;
  174. rblk->state = NVM_BLK_ST_FREE;
  175. } else if (rblk->state & NVM_BLK_ST_BAD) {
  176. list_move_tail(&rblk->list, &rlun->bb_list);
  177. rblk->state = NVM_BLK_ST_BAD;
  178. } else {
  179. WARN_ON_ONCE(1);
  180. pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
  181. rlun->bppa.g.ch, rlun->bppa.g.lun,
  182. rblk->id, rblk->state);
  183. list_move_tail(&rblk->list, &rlun->bb_list);
  184. }
  185. spin_unlock(&rlun->lock);
  186. }
  187. static void rrpc_put_blks(struct rrpc *rrpc)
  188. {
  189. struct rrpc_lun *rlun;
  190. int i;
  191. for (i = 0; i < rrpc->nr_luns; i++) {
  192. rlun = &rrpc->luns[i];
  193. if (rlun->cur)
  194. rrpc_put_blk(rrpc, rlun->cur);
  195. if (rlun->gc_cur)
  196. rrpc_put_blk(rrpc, rlun->gc_cur);
  197. }
  198. }
  199. static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
  200. {
  201. int next = atomic_inc_return(&rrpc->next_lun);
  202. return &rrpc->luns[next % rrpc->nr_luns];
  203. }
  204. static void rrpc_gc_kick(struct rrpc *rrpc)
  205. {
  206. struct rrpc_lun *rlun;
  207. unsigned int i;
  208. for (i = 0; i < rrpc->nr_luns; i++) {
  209. rlun = &rrpc->luns[i];
  210. queue_work(rrpc->krqd_wq, &rlun->ws_gc);
  211. }
  212. }
  213. /*
  214. * timed GC every interval.
  215. */
  216. static void rrpc_gc_timer(struct timer_list *t)
  217. {
  218. struct rrpc *rrpc = from_timer(rrpc, t, gc_timer);
  219. rrpc_gc_kick(rrpc);
  220. mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
  221. }
  222. static void rrpc_end_sync_bio(struct bio *bio)
  223. {
  224. struct completion *waiting = bio->bi_private;
  225. if (bio->bi_status)
  226. pr_err("nvm: gc request failed (%u).\n", bio->bi_status);
  227. complete(waiting);
  228. }
  229. /*
  230. * rrpc_move_valid_pages -- migrate live data off the block
  231. * @rrpc: the 'rrpc' structure
  232. * @block: the block from which to migrate live pages
  233. *
  234. * Description:
  235. * GC algorithms may call this function to migrate remaining live
  236. * pages off the block prior to erasing it. This function blocks
  237. * further execution until the operation is complete.
  238. */
  239. static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
  240. {
  241. struct nvm_tgt_dev *dev = rrpc->dev;
  242. struct request_queue *q = dev->q;
  243. struct rrpc_rev_addr *rev;
  244. struct nvm_rq *rqd;
  245. struct bio *bio;
  246. struct page *page;
  247. int slot;
  248. int nr_sec_per_blk = dev->geo.sec_per_blk;
  249. u64 phys_addr;
  250. DECLARE_COMPLETION_ONSTACK(wait);
  251. if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
  252. return 0;
  253. bio = bio_alloc(GFP_NOIO, 1);
  254. if (!bio) {
  255. pr_err("nvm: could not alloc bio to gc\n");
  256. return -ENOMEM;
  257. }
  258. page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
  259. while ((slot = find_first_zero_bit(rblk->invalid_pages,
  260. nr_sec_per_blk)) < nr_sec_per_blk) {
  261. /* Lock laddr */
  262. phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
  263. try:
  264. spin_lock(&rrpc->rev_lock);
  265. /* Get logical address from physical to logical table */
  266. rev = &rrpc->rev_trans_map[phys_addr];
  267. /* already updated by previous regular write */
  268. if (rev->addr == ADDR_EMPTY) {
  269. spin_unlock(&rrpc->rev_lock);
  270. continue;
  271. }
  272. rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
  273. if (IS_ERR_OR_NULL(rqd)) {
  274. spin_unlock(&rrpc->rev_lock);
  275. schedule();
  276. goto try;
  277. }
  278. spin_unlock(&rrpc->rev_lock);
  279. /* Perform read to do GC */
  280. bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
  281. bio_set_op_attrs(bio, REQ_OP_READ, 0);
  282. bio->bi_private = &wait;
  283. bio->bi_end_io = rrpc_end_sync_bio;
  284. /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
  285. bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
  286. if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
  287. pr_err("rrpc: gc read failed.\n");
  288. rrpc_inflight_laddr_release(rrpc, rqd);
  289. goto finished;
  290. }
  291. wait_for_completion_io(&wait);
  292. if (bio->bi_status) {
  293. rrpc_inflight_laddr_release(rrpc, rqd);
  294. goto finished;
  295. }
  296. bio_reset(bio);
  297. reinit_completion(&wait);
  298. bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
  299. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  300. bio->bi_private = &wait;
  301. bio->bi_end_io = rrpc_end_sync_bio;
  302. bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
  303. /* turn the command around and write the data back to a new
  304. * address
  305. */
  306. if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
  307. pr_err("rrpc: gc write failed.\n");
  308. rrpc_inflight_laddr_release(rrpc, rqd);
  309. goto finished;
  310. }
  311. wait_for_completion_io(&wait);
  312. rrpc_inflight_laddr_release(rrpc, rqd);
  313. if (bio->bi_status)
  314. goto finished;
  315. bio_reset(bio);
  316. }
  317. finished:
  318. mempool_free(page, rrpc->page_pool);
  319. bio_put(bio);
  320. if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
  321. pr_err("nvm: failed to garbage collect block\n");
  322. return -EIO;
  323. }
  324. return 0;
  325. }
  326. static void rrpc_block_gc(struct work_struct *work)
  327. {
  328. struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
  329. ws_gc);
  330. struct rrpc *rrpc = gcb->rrpc;
  331. struct rrpc_block *rblk = gcb->rblk;
  332. struct rrpc_lun *rlun = rblk->rlun;
  333. struct ppa_addr ppa;
  334. mempool_free(gcb, rrpc->gcb_pool);
  335. pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
  336. rlun->bppa.g.ch, rlun->bppa.g.lun,
  337. rblk->id);
  338. if (rrpc_move_valid_pages(rrpc, rblk))
  339. goto put_back;
  340. ppa.ppa = 0;
  341. ppa.g.ch = rlun->bppa.g.ch;
  342. ppa.g.lun = rlun->bppa.g.lun;
  343. ppa.g.blk = rblk->id;
  344. if (nvm_erase_sync(rrpc->dev, &ppa, 1))
  345. goto put_back;
  346. rrpc_put_blk(rrpc, rblk);
  347. return;
  348. put_back:
  349. spin_lock(&rlun->lock);
  350. list_add_tail(&rblk->prio, &rlun->prio_list);
  351. spin_unlock(&rlun->lock);
  352. }
  353. /* the block with highest number of invalid pages, will be in the beginning
  354. * of the list
  355. */
  356. static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
  357. struct rrpc_block *rb)
  358. {
  359. if (ra->nr_invalid_pages == rb->nr_invalid_pages)
  360. return ra;
  361. return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
  362. }
  363. /* linearly find the block with highest number of invalid pages
  364. * requires lun->lock
  365. */
  366. static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
  367. {
  368. struct list_head *prio_list = &rlun->prio_list;
  369. struct rrpc_block *rblk, *max;
  370. BUG_ON(list_empty(prio_list));
  371. max = list_first_entry(prio_list, struct rrpc_block, prio);
  372. list_for_each_entry(rblk, prio_list, prio)
  373. max = rblk_max_invalid(max, rblk);
  374. return max;
  375. }
  376. static void rrpc_lun_gc(struct work_struct *work)
  377. {
  378. struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
  379. struct rrpc *rrpc = rlun->rrpc;
  380. struct nvm_tgt_dev *dev = rrpc->dev;
  381. struct rrpc_block_gc *gcb;
  382. unsigned int nr_blocks_need;
  383. nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
  384. if (nr_blocks_need < rrpc->nr_luns)
  385. nr_blocks_need = rrpc->nr_luns;
  386. spin_lock(&rlun->lock);
  387. while (nr_blocks_need > rlun->nr_free_blocks &&
  388. !list_empty(&rlun->prio_list)) {
  389. struct rrpc_block *rblk = block_prio_find_max(rlun);
  390. if (!rblk->nr_invalid_pages)
  391. break;
  392. gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
  393. if (!gcb)
  394. break;
  395. list_del_init(&rblk->prio);
  396. WARN_ON(!block_is_full(rrpc, rblk));
  397. pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
  398. rlun->bppa.g.ch, rlun->bppa.g.lun,
  399. rblk->id);
  400. gcb->rrpc = rrpc;
  401. gcb->rblk = rblk;
  402. INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
  403. queue_work(rrpc->kgc_wq, &gcb->ws_gc);
  404. nr_blocks_need--;
  405. }
  406. spin_unlock(&rlun->lock);
  407. /* TODO: Hint that request queue can be started again */
  408. }
  409. static void rrpc_gc_queue(struct work_struct *work)
  410. {
  411. struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
  412. ws_gc);
  413. struct rrpc *rrpc = gcb->rrpc;
  414. struct rrpc_block *rblk = gcb->rblk;
  415. struct rrpc_lun *rlun = rblk->rlun;
  416. spin_lock(&rlun->lock);
  417. list_add_tail(&rblk->prio, &rlun->prio_list);
  418. spin_unlock(&rlun->lock);
  419. mempool_free(gcb, rrpc->gcb_pool);
  420. pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
  421. rlun->bppa.g.ch, rlun->bppa.g.lun,
  422. rblk->id);
  423. }
  424. static const struct block_device_operations rrpc_fops = {
  425. .owner = THIS_MODULE,
  426. };
  427. static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
  428. {
  429. unsigned int i;
  430. struct rrpc_lun *rlun, *max_free;
  431. if (!is_gc)
  432. return get_next_lun(rrpc);
  433. /* during GC, we don't care about RR, instead we want to make
  434. * sure that we maintain evenness between the block luns.
  435. */
  436. max_free = &rrpc->luns[0];
  437. /* prevent GC-ing lun from devouring pages of a lun with
  438. * little free blocks. We don't take the lock as we only need an
  439. * estimate.
  440. */
  441. rrpc_for_each_lun(rrpc, rlun, i) {
  442. if (rlun->nr_free_blocks > max_free->nr_free_blocks)
  443. max_free = rlun;
  444. }
  445. return max_free;
  446. }
  447. static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
  448. struct rrpc_block *rblk, u64 paddr)
  449. {
  450. struct rrpc_addr *gp;
  451. struct rrpc_rev_addr *rev;
  452. BUG_ON(laddr >= rrpc->nr_sects);
  453. gp = &rrpc->trans_map[laddr];
  454. spin_lock(&rrpc->rev_lock);
  455. if (gp->rblk)
  456. rrpc_page_invalidate(rrpc, gp);
  457. gp->addr = paddr;
  458. gp->rblk = rblk;
  459. rev = &rrpc->rev_trans_map[gp->addr];
  460. rev->addr = laddr;
  461. spin_unlock(&rrpc->rev_lock);
  462. return gp;
  463. }
  464. static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
  465. {
  466. u64 addr = ADDR_EMPTY;
  467. spin_lock(&rblk->lock);
  468. if (block_is_full(rrpc, rblk))
  469. goto out;
  470. addr = rblk->next_page;
  471. rblk->next_page++;
  472. out:
  473. spin_unlock(&rblk->lock);
  474. return addr;
  475. }
  476. /* Map logical address to a physical page. The mapping implements a round robin
  477. * approach and allocates a page from the next lun available.
  478. *
  479. * Returns rrpc_addr with the physical address and block. Returns NULL if no
  480. * blocks in the next rlun are available.
  481. */
  482. static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
  483. int is_gc)
  484. {
  485. struct nvm_tgt_dev *tgt_dev = rrpc->dev;
  486. struct rrpc_lun *rlun;
  487. struct rrpc_block *rblk, **cur_rblk;
  488. struct rrpc_addr *p;
  489. struct ppa_addr ppa;
  490. u64 paddr;
  491. int gc_force = 0;
  492. ppa.ppa = ADDR_EMPTY;
  493. rlun = rrpc_get_lun_rr(rrpc, is_gc);
  494. if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
  495. return ppa;
  496. /*
  497. * page allocation steps:
  498. * 1. Try to allocate new page from current rblk
  499. * 2a. If succeed, proceed to map it in and return
  500. * 2b. If fail, first try to allocate a new block from media manger,
  501. * and then retry step 1. Retry until the normal block pool is
  502. * exhausted.
  503. * 3. If exhausted, and garbage collector is requesting the block,
  504. * go to the reserved block and retry step 1.
  505. * In the case that this fails as well, or it is not GC
  506. * requesting, report not able to retrieve a block and let the
  507. * caller handle further processing.
  508. */
  509. spin_lock(&rlun->lock);
  510. cur_rblk = &rlun->cur;
  511. rblk = rlun->cur;
  512. retry:
  513. paddr = rrpc_alloc_addr(rrpc, rblk);
  514. if (paddr != ADDR_EMPTY)
  515. goto done;
  516. if (!list_empty(&rlun->wblk_list)) {
  517. new_blk:
  518. rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
  519. prio);
  520. rrpc_set_lun_cur(rlun, rblk, cur_rblk);
  521. list_del(&rblk->prio);
  522. goto retry;
  523. }
  524. spin_unlock(&rlun->lock);
  525. rblk = rrpc_get_blk(rrpc, rlun, gc_force);
  526. if (rblk) {
  527. spin_lock(&rlun->lock);
  528. list_add_tail(&rblk->prio, &rlun->wblk_list);
  529. /*
  530. * another thread might already have added a new block,
  531. * Therefore, make sure that one is used, instead of the
  532. * one just added.
  533. */
  534. goto new_blk;
  535. }
  536. if (unlikely(is_gc) && !gc_force) {
  537. /* retry from emergency gc block */
  538. cur_rblk = &rlun->gc_cur;
  539. rblk = rlun->gc_cur;
  540. gc_force = 1;
  541. spin_lock(&rlun->lock);
  542. goto retry;
  543. }
  544. pr_err("rrpc: failed to allocate new block\n");
  545. return ppa;
  546. done:
  547. spin_unlock(&rlun->lock);
  548. p = rrpc_update_map(rrpc, laddr, rblk, paddr);
  549. if (!p)
  550. return ppa;
  551. /* return global address */
  552. return rrpc_ppa_to_gaddr(tgt_dev, p);
  553. }
  554. static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
  555. {
  556. struct rrpc_block_gc *gcb;
  557. gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
  558. if (!gcb) {
  559. pr_err("rrpc: unable to queue block for gc.");
  560. return;
  561. }
  562. gcb->rrpc = rrpc;
  563. gcb->rblk = rblk;
  564. INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
  565. queue_work(rrpc->kgc_wq, &gcb->ws_gc);
  566. }
  567. static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
  568. {
  569. struct rrpc_lun *rlun = NULL;
  570. int i;
  571. for (i = 0; i < rrpc->nr_luns; i++) {
  572. if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
  573. rrpc->luns[i].bppa.g.lun == p.g.lun) {
  574. rlun = &rrpc->luns[i];
  575. break;
  576. }
  577. }
  578. return rlun;
  579. }
  580. static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
  581. {
  582. struct nvm_tgt_dev *dev = rrpc->dev;
  583. struct rrpc_lun *rlun;
  584. struct rrpc_block *rblk;
  585. rlun = rrpc_ppa_to_lun(rrpc, ppa);
  586. rblk = &rlun->blocks[ppa.g.blk];
  587. rblk->state = NVM_BLK_ST_BAD;
  588. nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
  589. }
  590. static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
  591. {
  592. void *comp_bits = &rqd->ppa_status;
  593. struct ppa_addr ppa, prev_ppa;
  594. int nr_ppas = rqd->nr_ppas;
  595. int bit;
  596. if (rqd->nr_ppas == 1)
  597. __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
  598. ppa_set_empty(&prev_ppa);
  599. bit = -1;
  600. while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
  601. ppa = rqd->ppa_list[bit];
  602. if (ppa_cmp_blk(ppa, prev_ppa))
  603. continue;
  604. __rrpc_mark_bad_block(rrpc, ppa);
  605. }
  606. }
  607. static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
  608. sector_t laddr, uint8_t npages)
  609. {
  610. struct nvm_tgt_dev *dev = rrpc->dev;
  611. struct rrpc_addr *p;
  612. struct rrpc_block *rblk;
  613. int cmnt_size, i;
  614. for (i = 0; i < npages; i++) {
  615. p = &rrpc->trans_map[laddr + i];
  616. rblk = p->rblk;
  617. cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
  618. if (unlikely(cmnt_size == dev->geo.sec_per_blk))
  619. rrpc_run_gc(rrpc, rblk);
  620. }
  621. }
  622. static void rrpc_end_io(struct nvm_rq *rqd)
  623. {
  624. struct rrpc *rrpc = rqd->private;
  625. struct nvm_tgt_dev *dev = rrpc->dev;
  626. struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
  627. uint8_t npages = rqd->nr_ppas;
  628. sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
  629. if (bio_data_dir(rqd->bio) == WRITE) {
  630. if (rqd->error == NVM_RSP_ERR_FAILWRITE)
  631. rrpc_mark_bad_block(rrpc, rqd);
  632. rrpc_end_io_write(rrpc, rrqd, laddr, npages);
  633. }
  634. bio_put(rqd->bio);
  635. if (rrqd->flags & NVM_IOTYPE_GC)
  636. return;
  637. rrpc_unlock_rq(rrpc, rqd);
  638. if (npages > 1)
  639. nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
  640. mempool_free(rqd, rrpc->rq_pool);
  641. }
  642. static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
  643. struct nvm_rq *rqd, unsigned long flags, int npages)
  644. {
  645. struct nvm_tgt_dev *dev = rrpc->dev;
  646. struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
  647. struct rrpc_addr *gp;
  648. sector_t laddr = rrpc_get_laddr(bio);
  649. int is_gc = flags & NVM_IOTYPE_GC;
  650. int i;
  651. if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
  652. nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
  653. return NVM_IO_REQUEUE;
  654. }
  655. for (i = 0; i < npages; i++) {
  656. /* We assume that mapping occurs at 4KB granularity */
  657. BUG_ON(!(laddr + i < rrpc->nr_sects));
  658. gp = &rrpc->trans_map[laddr + i];
  659. if (gp->rblk) {
  660. rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
  661. } else {
  662. BUG_ON(is_gc);
  663. rrpc_unlock_laddr(rrpc, r);
  664. nvm_dev_dma_free(dev->parent, rqd->ppa_list,
  665. rqd->dma_ppa_list);
  666. return NVM_IO_DONE;
  667. }
  668. }
  669. rqd->opcode = NVM_OP_HBREAD;
  670. return NVM_IO_OK;
  671. }
  672. static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
  673. unsigned long flags)
  674. {
  675. int is_gc = flags & NVM_IOTYPE_GC;
  676. sector_t laddr = rrpc_get_laddr(bio);
  677. struct rrpc_addr *gp;
  678. if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
  679. return NVM_IO_REQUEUE;
  680. BUG_ON(!(laddr < rrpc->nr_sects));
  681. gp = &rrpc->trans_map[laddr];
  682. if (gp->rblk) {
  683. rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
  684. } else {
  685. BUG_ON(is_gc);
  686. rrpc_unlock_rq(rrpc, rqd);
  687. return NVM_IO_DONE;
  688. }
  689. rqd->opcode = NVM_OP_HBREAD;
  690. return NVM_IO_OK;
  691. }
  692. static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
  693. struct nvm_rq *rqd, unsigned long flags, int npages)
  694. {
  695. struct nvm_tgt_dev *dev = rrpc->dev;
  696. struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
  697. struct ppa_addr p;
  698. sector_t laddr = rrpc_get_laddr(bio);
  699. int is_gc = flags & NVM_IOTYPE_GC;
  700. int i;
  701. if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
  702. nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
  703. return NVM_IO_REQUEUE;
  704. }
  705. for (i = 0; i < npages; i++) {
  706. /* We assume that mapping occurs at 4KB granularity */
  707. p = rrpc_map_page(rrpc, laddr + i, is_gc);
  708. if (p.ppa == ADDR_EMPTY) {
  709. BUG_ON(is_gc);
  710. rrpc_unlock_laddr(rrpc, r);
  711. nvm_dev_dma_free(dev->parent, rqd->ppa_list,
  712. rqd->dma_ppa_list);
  713. rrpc_gc_kick(rrpc);
  714. return NVM_IO_REQUEUE;
  715. }
  716. rqd->ppa_list[i] = p;
  717. }
  718. rqd->opcode = NVM_OP_HBWRITE;
  719. return NVM_IO_OK;
  720. }
  721. static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
  722. struct nvm_rq *rqd, unsigned long flags)
  723. {
  724. struct ppa_addr p;
  725. int is_gc = flags & NVM_IOTYPE_GC;
  726. sector_t laddr = rrpc_get_laddr(bio);
  727. if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
  728. return NVM_IO_REQUEUE;
  729. p = rrpc_map_page(rrpc, laddr, is_gc);
  730. if (p.ppa == ADDR_EMPTY) {
  731. BUG_ON(is_gc);
  732. rrpc_unlock_rq(rrpc, rqd);
  733. rrpc_gc_kick(rrpc);
  734. return NVM_IO_REQUEUE;
  735. }
  736. rqd->ppa_addr = p;
  737. rqd->opcode = NVM_OP_HBWRITE;
  738. return NVM_IO_OK;
  739. }
  740. static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
  741. struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
  742. {
  743. struct nvm_tgt_dev *dev = rrpc->dev;
  744. if (npages > 1) {
  745. rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  746. &rqd->dma_ppa_list);
  747. if (!rqd->ppa_list) {
  748. pr_err("rrpc: not able to allocate ppa list\n");
  749. return NVM_IO_ERR;
  750. }
  751. if (bio_op(bio) == REQ_OP_WRITE)
  752. return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
  753. npages);
  754. return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
  755. }
  756. if (bio_op(bio) == REQ_OP_WRITE)
  757. return rrpc_write_rq(rrpc, bio, rqd, flags);
  758. return rrpc_read_rq(rrpc, bio, rqd, flags);
  759. }
  760. static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
  761. struct nvm_rq *rqd, unsigned long flags)
  762. {
  763. struct nvm_tgt_dev *dev = rrpc->dev;
  764. struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
  765. uint8_t nr_pages = rrpc_get_pages(bio);
  766. int bio_size = bio_sectors(bio) << 9;
  767. int err;
  768. if (bio_size < dev->geo.sec_size)
  769. return NVM_IO_ERR;
  770. else if (bio_size > dev->geo.max_rq_size)
  771. return NVM_IO_ERR;
  772. err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
  773. if (err)
  774. return err;
  775. bio_get(bio);
  776. rqd->bio = bio;
  777. rqd->private = rrpc;
  778. rqd->nr_ppas = nr_pages;
  779. rqd->end_io = rrpc_end_io;
  780. rrq->flags = flags;
  781. err = nvm_submit_io(dev, rqd);
  782. if (err) {
  783. pr_err("rrpc: I/O submission failed: %d\n", err);
  784. bio_put(bio);
  785. if (!(flags & NVM_IOTYPE_GC)) {
  786. rrpc_unlock_rq(rrpc, rqd);
  787. if (rqd->nr_ppas > 1)
  788. nvm_dev_dma_free(dev->parent, rqd->ppa_list,
  789. rqd->dma_ppa_list);
  790. }
  791. return NVM_IO_ERR;
  792. }
  793. return NVM_IO_OK;
  794. }
  795. static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
  796. {
  797. struct rrpc *rrpc = q->queuedata;
  798. struct nvm_rq *rqd;
  799. int err;
  800. blk_queue_split(q, &bio);
  801. if (bio_op(bio) == REQ_OP_DISCARD) {
  802. rrpc_discard(rrpc, bio);
  803. return BLK_QC_T_NONE;
  804. }
  805. rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
  806. memset(rqd, 0, sizeof(struct nvm_rq));
  807. err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
  808. switch (err) {
  809. case NVM_IO_OK:
  810. return BLK_QC_T_NONE;
  811. case NVM_IO_ERR:
  812. bio_io_error(bio);
  813. break;
  814. case NVM_IO_DONE:
  815. bio_endio(bio);
  816. break;
  817. case NVM_IO_REQUEUE:
  818. spin_lock(&rrpc->bio_lock);
  819. bio_list_add(&rrpc->requeue_bios, bio);
  820. spin_unlock(&rrpc->bio_lock);
  821. queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
  822. break;
  823. }
  824. mempool_free(rqd, rrpc->rq_pool);
  825. return BLK_QC_T_NONE;
  826. }
  827. static void rrpc_requeue(struct work_struct *work)
  828. {
  829. struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
  830. struct bio_list bios;
  831. struct bio *bio;
  832. bio_list_init(&bios);
  833. spin_lock(&rrpc->bio_lock);
  834. bio_list_merge(&bios, &rrpc->requeue_bios);
  835. bio_list_init(&rrpc->requeue_bios);
  836. spin_unlock(&rrpc->bio_lock);
  837. while ((bio = bio_list_pop(&bios)))
  838. rrpc_make_rq(rrpc->disk->queue, bio);
  839. }
  840. static void rrpc_gc_free(struct rrpc *rrpc)
  841. {
  842. if (rrpc->krqd_wq)
  843. destroy_workqueue(rrpc->krqd_wq);
  844. if (rrpc->kgc_wq)
  845. destroy_workqueue(rrpc->kgc_wq);
  846. }
  847. static int rrpc_gc_init(struct rrpc *rrpc)
  848. {
  849. rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
  850. rrpc->nr_luns);
  851. if (!rrpc->krqd_wq)
  852. return -ENOMEM;
  853. rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
  854. if (!rrpc->kgc_wq)
  855. return -ENOMEM;
  856. timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0);
  857. return 0;
  858. }
  859. static void rrpc_map_free(struct rrpc *rrpc)
  860. {
  861. vfree(rrpc->rev_trans_map);
  862. vfree(rrpc->trans_map);
  863. }
  864. static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
  865. {
  866. struct rrpc *rrpc = (struct rrpc *)private;
  867. struct nvm_tgt_dev *dev = rrpc->dev;
  868. struct rrpc_addr *addr = rrpc->trans_map + slba;
  869. struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
  870. struct rrpc_lun *rlun;
  871. struct rrpc_block *rblk;
  872. u64 i;
  873. for (i = 0; i < nlb; i++) {
  874. struct ppa_addr gaddr;
  875. u64 pba = le64_to_cpu(entries[i]);
  876. unsigned int mod;
  877. /* LNVM treats address-spaces as silos, LBA and PBA are
  878. * equally large and zero-indexed.
  879. */
  880. if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
  881. pr_err("nvm: L2P data entry is out of bounds!\n");
  882. pr_err("nvm: Maybe loaded an old target L2P\n");
  883. return -EINVAL;
  884. }
  885. /* Address zero is a special one. The first page on a disk is
  886. * protected. As it often holds internal device boot
  887. * information.
  888. */
  889. if (!pba)
  890. continue;
  891. div_u64_rem(pba, rrpc->nr_sects, &mod);
  892. gaddr = rrpc_recov_addr(dev, pba);
  893. rlun = rrpc_ppa_to_lun(rrpc, gaddr);
  894. if (!rlun) {
  895. pr_err("rrpc: l2p corruption on lba %llu\n",
  896. slba + i);
  897. return -EINVAL;
  898. }
  899. rblk = &rlun->blocks[gaddr.g.blk];
  900. if (!rblk->state) {
  901. /* at this point, we don't know anything about the
  902. * block. It's up to the FTL on top to re-etablish the
  903. * block state. The block is assumed to be open.
  904. */
  905. list_move_tail(&rblk->list, &rlun->used_list);
  906. rblk->state = NVM_BLK_ST_TGT;
  907. rlun->nr_free_blocks--;
  908. }
  909. addr[i].addr = pba;
  910. addr[i].rblk = rblk;
  911. raddr[mod].addr = slba + i;
  912. }
  913. return 0;
  914. }
  915. static int rrpc_map_init(struct rrpc *rrpc)
  916. {
  917. struct nvm_tgt_dev *dev = rrpc->dev;
  918. sector_t i;
  919. int ret;
  920. rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
  921. if (!rrpc->trans_map)
  922. return -ENOMEM;
  923. rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
  924. * rrpc->nr_sects);
  925. if (!rrpc->rev_trans_map)
  926. return -ENOMEM;
  927. for (i = 0; i < rrpc->nr_sects; i++) {
  928. struct rrpc_addr *p = &rrpc->trans_map[i];
  929. struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
  930. p->addr = ADDR_EMPTY;
  931. r->addr = ADDR_EMPTY;
  932. }
  933. /* Bring up the mapping table from device */
  934. ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
  935. rrpc_l2p_update, rrpc);
  936. if (ret) {
  937. pr_err("nvm: rrpc: could not read L2P table.\n");
  938. return -EINVAL;
  939. }
  940. return 0;
  941. }
  942. /* Minimum pages needed within a lun */
  943. #define PAGE_POOL_SIZE 16
  944. #define ADDR_POOL_SIZE 64
  945. static int rrpc_core_init(struct rrpc *rrpc)
  946. {
  947. down_write(&rrpc_lock);
  948. if (!rrpc_gcb_cache) {
  949. rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
  950. sizeof(struct rrpc_block_gc), 0, 0, NULL);
  951. if (!rrpc_gcb_cache) {
  952. up_write(&rrpc_lock);
  953. return -ENOMEM;
  954. }
  955. rrpc_rq_cache = kmem_cache_create("rrpc_rq",
  956. sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
  957. 0, 0, NULL);
  958. if (!rrpc_rq_cache) {
  959. kmem_cache_destroy(rrpc_gcb_cache);
  960. up_write(&rrpc_lock);
  961. return -ENOMEM;
  962. }
  963. }
  964. up_write(&rrpc_lock);
  965. rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
  966. if (!rrpc->page_pool)
  967. return -ENOMEM;
  968. rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
  969. rrpc_gcb_cache);
  970. if (!rrpc->gcb_pool)
  971. return -ENOMEM;
  972. rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
  973. if (!rrpc->rq_pool)
  974. return -ENOMEM;
  975. spin_lock_init(&rrpc->inflights.lock);
  976. INIT_LIST_HEAD(&rrpc->inflights.reqs);
  977. return 0;
  978. }
  979. static void rrpc_core_free(struct rrpc *rrpc)
  980. {
  981. mempool_destroy(rrpc->page_pool);
  982. mempool_destroy(rrpc->gcb_pool);
  983. mempool_destroy(rrpc->rq_pool);
  984. }
  985. static void rrpc_luns_free(struct rrpc *rrpc)
  986. {
  987. struct rrpc_lun *rlun;
  988. int i;
  989. if (!rrpc->luns)
  990. return;
  991. for (i = 0; i < rrpc->nr_luns; i++) {
  992. rlun = &rrpc->luns[i];
  993. vfree(rlun->blocks);
  994. }
  995. kfree(rrpc->luns);
  996. }
  997. static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
  998. {
  999. struct nvm_geo *geo = &dev->geo;
  1000. struct rrpc_block *rblk;
  1001. struct ppa_addr ppa;
  1002. u8 *blks;
  1003. int nr_blks;
  1004. int i;
  1005. int ret;
  1006. if (!dev->parent->ops->get_bb_tbl)
  1007. return 0;
  1008. nr_blks = geo->blks_per_lun * geo->plane_mode;
  1009. blks = kmalloc(nr_blks, GFP_KERNEL);
  1010. if (!blks)
  1011. return -ENOMEM;
  1012. ppa.ppa = 0;
  1013. ppa.g.ch = rlun->bppa.g.ch;
  1014. ppa.g.lun = rlun->bppa.g.lun;
  1015. ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
  1016. if (ret) {
  1017. pr_err("rrpc: could not get BB table\n");
  1018. goto out;
  1019. }
  1020. nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
  1021. if (nr_blks < 0) {
  1022. ret = nr_blks;
  1023. goto out;
  1024. }
  1025. for (i = 0; i < nr_blks; i++) {
  1026. if (blks[i] == NVM_BLK_T_FREE)
  1027. continue;
  1028. rblk = &rlun->blocks[i];
  1029. list_move_tail(&rblk->list, &rlun->bb_list);
  1030. rblk->state = NVM_BLK_ST_BAD;
  1031. rlun->nr_free_blocks--;
  1032. }
  1033. out:
  1034. kfree(blks);
  1035. return ret;
  1036. }
  1037. static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
  1038. {
  1039. rlun->bppa.ppa = 0;
  1040. rlun->bppa.g.ch = ppa.g.ch;
  1041. rlun->bppa.g.lun = ppa.g.lun;
  1042. }
  1043. static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
  1044. {
  1045. struct nvm_tgt_dev *dev = rrpc->dev;
  1046. struct nvm_geo *geo = &dev->geo;
  1047. struct rrpc_lun *rlun;
  1048. int i, j, ret = -EINVAL;
  1049. if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
  1050. pr_err("rrpc: number of pages per block too high.");
  1051. return -EINVAL;
  1052. }
  1053. spin_lock_init(&rrpc->rev_lock);
  1054. rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
  1055. GFP_KERNEL);
  1056. if (!rrpc->luns)
  1057. return -ENOMEM;
  1058. /* 1:1 mapping */
  1059. for (i = 0; i < rrpc->nr_luns; i++) {
  1060. rlun = &rrpc->luns[i];
  1061. rlun->id = i;
  1062. rrpc_set_lun_ppa(rlun, luns[i]);
  1063. rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
  1064. geo->blks_per_lun);
  1065. if (!rlun->blocks) {
  1066. ret = -ENOMEM;
  1067. goto err;
  1068. }
  1069. INIT_LIST_HEAD(&rlun->free_list);
  1070. INIT_LIST_HEAD(&rlun->used_list);
  1071. INIT_LIST_HEAD(&rlun->bb_list);
  1072. for (j = 0; j < geo->blks_per_lun; j++) {
  1073. struct rrpc_block *rblk = &rlun->blocks[j];
  1074. rblk->id = j;
  1075. rblk->rlun = rlun;
  1076. rblk->state = NVM_BLK_T_FREE;
  1077. INIT_LIST_HEAD(&rblk->prio);
  1078. INIT_LIST_HEAD(&rblk->list);
  1079. spin_lock_init(&rblk->lock);
  1080. list_add_tail(&rblk->list, &rlun->free_list);
  1081. }
  1082. rlun->rrpc = rrpc;
  1083. rlun->nr_free_blocks = geo->blks_per_lun;
  1084. rlun->reserved_blocks = 2; /* for GC only */
  1085. INIT_LIST_HEAD(&rlun->prio_list);
  1086. INIT_LIST_HEAD(&rlun->wblk_list);
  1087. INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
  1088. spin_lock_init(&rlun->lock);
  1089. if (rrpc_bb_discovery(dev, rlun))
  1090. goto err;
  1091. }
  1092. return 0;
  1093. err:
  1094. return ret;
  1095. }
  1096. /* returns 0 on success and stores the beginning address in *begin */
  1097. static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
  1098. {
  1099. struct nvm_tgt_dev *dev = rrpc->dev;
  1100. sector_t size = rrpc->nr_sects * dev->geo.sec_size;
  1101. int ret;
  1102. size >>= 9;
  1103. ret = nvm_get_area(dev, begin, size);
  1104. if (!ret)
  1105. *begin >>= (ilog2(dev->geo.sec_size) - 9);
  1106. return ret;
  1107. }
  1108. static void rrpc_area_free(struct rrpc *rrpc)
  1109. {
  1110. struct nvm_tgt_dev *dev = rrpc->dev;
  1111. sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
  1112. nvm_put_area(dev, begin);
  1113. }
  1114. static void rrpc_free(struct rrpc *rrpc)
  1115. {
  1116. rrpc_gc_free(rrpc);
  1117. rrpc_map_free(rrpc);
  1118. rrpc_core_free(rrpc);
  1119. rrpc_luns_free(rrpc);
  1120. rrpc_area_free(rrpc);
  1121. kfree(rrpc);
  1122. }
  1123. static void rrpc_exit(void *private)
  1124. {
  1125. struct rrpc *rrpc = private;
  1126. del_timer(&rrpc->gc_timer);
  1127. flush_workqueue(rrpc->krqd_wq);
  1128. flush_workqueue(rrpc->kgc_wq);
  1129. rrpc_free(rrpc);
  1130. }
  1131. static sector_t rrpc_capacity(void *private)
  1132. {
  1133. struct rrpc *rrpc = private;
  1134. struct nvm_tgt_dev *dev = rrpc->dev;
  1135. sector_t reserved, provisioned;
  1136. /* cur, gc, and two emergency blocks for each lun */
  1137. reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
  1138. provisioned = rrpc->nr_sects - reserved;
  1139. if (reserved > rrpc->nr_sects) {
  1140. pr_err("rrpc: not enough space available to expose storage.\n");
  1141. return 0;
  1142. }
  1143. sector_div(provisioned, 10);
  1144. return provisioned * 9 * NR_PHY_IN_LOG;
  1145. }
  1146. /*
  1147. * Looks up the logical address from reverse trans map and check if its valid by
  1148. * comparing the logical to physical address with the physical address.
  1149. * Returns 0 on free, otherwise 1 if in use
  1150. */
  1151. static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
  1152. {
  1153. struct nvm_tgt_dev *dev = rrpc->dev;
  1154. int offset;
  1155. struct rrpc_addr *laddr;
  1156. u64 bpaddr, paddr, pladdr;
  1157. bpaddr = block_to_rel_addr(rrpc, rblk);
  1158. for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
  1159. paddr = bpaddr + offset;
  1160. pladdr = rrpc->rev_trans_map[paddr].addr;
  1161. if (pladdr == ADDR_EMPTY)
  1162. continue;
  1163. laddr = &rrpc->trans_map[pladdr];
  1164. if (paddr == laddr->addr) {
  1165. laddr->rblk = rblk;
  1166. } else {
  1167. set_bit(offset, rblk->invalid_pages);
  1168. rblk->nr_invalid_pages++;
  1169. }
  1170. }
  1171. }
  1172. static int rrpc_blocks_init(struct rrpc *rrpc)
  1173. {
  1174. struct nvm_tgt_dev *dev = rrpc->dev;
  1175. struct rrpc_lun *rlun;
  1176. struct rrpc_block *rblk;
  1177. int lun_iter, blk_iter;
  1178. for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
  1179. rlun = &rrpc->luns[lun_iter];
  1180. for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
  1181. blk_iter++) {
  1182. rblk = &rlun->blocks[blk_iter];
  1183. rrpc_block_map_update(rrpc, rblk);
  1184. }
  1185. }
  1186. return 0;
  1187. }
  1188. static int rrpc_luns_configure(struct rrpc *rrpc)
  1189. {
  1190. struct rrpc_lun *rlun;
  1191. struct rrpc_block *rblk;
  1192. int i;
  1193. for (i = 0; i < rrpc->nr_luns; i++) {
  1194. rlun = &rrpc->luns[i];
  1195. rblk = rrpc_get_blk(rrpc, rlun, 0);
  1196. if (!rblk)
  1197. goto err;
  1198. rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
  1199. /* Emergency gc block */
  1200. rblk = rrpc_get_blk(rrpc, rlun, 1);
  1201. if (!rblk)
  1202. goto err;
  1203. rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
  1204. }
  1205. return 0;
  1206. err:
  1207. rrpc_put_blks(rrpc);
  1208. return -EINVAL;
  1209. }
  1210. static struct nvm_tgt_type tt_rrpc;
  1211. static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
  1212. int flags)
  1213. {
  1214. struct request_queue *bqueue = dev->q;
  1215. struct request_queue *tqueue = tdisk->queue;
  1216. struct nvm_geo *geo = &dev->geo;
  1217. struct rrpc *rrpc;
  1218. sector_t soffset;
  1219. int ret;
  1220. if (!(dev->identity.dom & NVM_RSP_L2P)) {
  1221. pr_err("nvm: rrpc: device does not support l2p (%x)\n",
  1222. dev->identity.dom);
  1223. return ERR_PTR(-EINVAL);
  1224. }
  1225. rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
  1226. if (!rrpc)
  1227. return ERR_PTR(-ENOMEM);
  1228. rrpc->dev = dev;
  1229. rrpc->disk = tdisk;
  1230. bio_list_init(&rrpc->requeue_bios);
  1231. spin_lock_init(&rrpc->bio_lock);
  1232. INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
  1233. rrpc->nr_luns = geo->nr_luns;
  1234. rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
  1235. /* simple round-robin strategy */
  1236. atomic_set(&rrpc->next_lun, -1);
  1237. ret = rrpc_area_init(rrpc, &soffset);
  1238. if (ret < 0) {
  1239. pr_err("nvm: rrpc: could not initialize area\n");
  1240. return ERR_PTR(ret);
  1241. }
  1242. rrpc->soffset = soffset;
  1243. ret = rrpc_luns_init(rrpc, dev->luns);
  1244. if (ret) {
  1245. pr_err("nvm: rrpc: could not initialize luns\n");
  1246. goto err;
  1247. }
  1248. ret = rrpc_core_init(rrpc);
  1249. if (ret) {
  1250. pr_err("nvm: rrpc: could not initialize core\n");
  1251. goto err;
  1252. }
  1253. ret = rrpc_map_init(rrpc);
  1254. if (ret) {
  1255. pr_err("nvm: rrpc: could not initialize maps\n");
  1256. goto err;
  1257. }
  1258. ret = rrpc_blocks_init(rrpc);
  1259. if (ret) {
  1260. pr_err("nvm: rrpc: could not initialize state for blocks\n");
  1261. goto err;
  1262. }
  1263. ret = rrpc_luns_configure(rrpc);
  1264. if (ret) {
  1265. pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
  1266. goto err;
  1267. }
  1268. ret = rrpc_gc_init(rrpc);
  1269. if (ret) {
  1270. pr_err("nvm: rrpc: could not initialize gc\n");
  1271. goto err;
  1272. }
  1273. /* inherit the size from the underlying device */
  1274. blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
  1275. blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
  1276. pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
  1277. rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
  1278. mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
  1279. return rrpc;
  1280. err:
  1281. rrpc_free(rrpc);
  1282. return ERR_PTR(ret);
  1283. }
  1284. /* round robin, page-based FTL, and cost-based GC */
  1285. static struct nvm_tgt_type tt_rrpc = {
  1286. .name = "rrpc",
  1287. .version = {1, 0, 0},
  1288. .make_rq = rrpc_make_rq,
  1289. .capacity = rrpc_capacity,
  1290. .init = rrpc_init,
  1291. .exit = rrpc_exit,
  1292. };
  1293. static int __init rrpc_module_init(void)
  1294. {
  1295. return nvm_register_tgt_type(&tt_rrpc);
  1296. }
  1297. static void rrpc_module_exit(void)
  1298. {
  1299. nvm_unregister_tgt_type(&tt_rrpc);
  1300. }
  1301. module_init(rrpc_module_init);
  1302. module_exit(rrpc_module_exit);
  1303. MODULE_LICENSE("GPL v2");
  1304. MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");