pblk-core.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-core.c - pblk's core functionality
  16. *
  17. */
  18. #include "pblk.h"
  19. static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
  20. struct ppa_addr *ppa)
  21. {
  22. struct nvm_tgt_dev *dev = pblk->dev;
  23. struct nvm_geo *geo = &dev->geo;
  24. int pos = pblk_dev_ppa_to_pos(geo, *ppa);
  25. pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
  26. atomic_long_inc(&pblk->erase_failed);
  27. atomic_dec(&line->blk_in_line);
  28. if (test_and_set_bit(pos, line->blk_bitmap))
  29. pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
  30. line->id, pos);
  31. pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb, pblk->bb_wq);
  32. }
  33. static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
  34. {
  35. struct pblk_line *line;
  36. line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
  37. atomic_dec(&line->left_seblks);
  38. if (rqd->error) {
  39. struct ppa_addr *ppa;
  40. ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
  41. if (!ppa)
  42. return;
  43. *ppa = rqd->ppa_addr;
  44. pblk_mark_bb(pblk, line, ppa);
  45. }
  46. atomic_dec(&pblk->inflight_io);
  47. }
  48. /* Erase completion assumes that only one block is erased at the time */
  49. static void pblk_end_io_erase(struct nvm_rq *rqd)
  50. {
  51. struct pblk *pblk = rqd->private;
  52. __pblk_end_io_erase(pblk, rqd);
  53. mempool_free(rqd, pblk->g_rq_pool);
  54. }
  55. void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
  56. u64 paddr)
  57. {
  58. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  59. struct list_head *move_list = NULL;
  60. /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
  61. * table is modified with reclaimed sectors, a check is done to endure
  62. * that newer updates are not overwritten.
  63. */
  64. spin_lock(&line->lock);
  65. if (line->state == PBLK_LINESTATE_GC ||
  66. line->state == PBLK_LINESTATE_FREE) {
  67. spin_unlock(&line->lock);
  68. return;
  69. }
  70. if (test_and_set_bit(paddr, line->invalid_bitmap)) {
  71. WARN_ONCE(1, "pblk: double invalidate\n");
  72. spin_unlock(&line->lock);
  73. return;
  74. }
  75. le32_add_cpu(line->vsc, -1);
  76. if (line->state == PBLK_LINESTATE_CLOSED)
  77. move_list = pblk_line_gc_list(pblk, line);
  78. spin_unlock(&line->lock);
  79. if (move_list) {
  80. spin_lock(&l_mg->gc_lock);
  81. spin_lock(&line->lock);
  82. /* Prevent moving a line that has just been chosen for GC */
  83. if (line->state == PBLK_LINESTATE_GC ||
  84. line->state == PBLK_LINESTATE_FREE) {
  85. spin_unlock(&line->lock);
  86. spin_unlock(&l_mg->gc_lock);
  87. return;
  88. }
  89. spin_unlock(&line->lock);
  90. list_move_tail(&line->list, move_list);
  91. spin_unlock(&l_mg->gc_lock);
  92. }
  93. }
  94. void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
  95. {
  96. struct pblk_line *line;
  97. u64 paddr;
  98. int line_id;
  99. #ifdef CONFIG_NVM_DEBUG
  100. /* Callers must ensure that the ppa points to a device address */
  101. BUG_ON(pblk_addr_in_cache(ppa));
  102. BUG_ON(pblk_ppa_empty(ppa));
  103. #endif
  104. line_id = pblk_tgt_ppa_to_line(ppa);
  105. line = &pblk->lines[line_id];
  106. paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
  107. __pblk_map_invalidate(pblk, line, paddr);
  108. }
  109. static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
  110. unsigned int nr_secs)
  111. {
  112. sector_t lba;
  113. spin_lock(&pblk->trans_lock);
  114. for (lba = slba; lba < slba + nr_secs; lba++) {
  115. struct ppa_addr ppa;
  116. ppa = pblk_trans_map_get(pblk, lba);
  117. if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
  118. pblk_map_invalidate(pblk, ppa);
  119. pblk_ppa_set_empty(&ppa);
  120. pblk_trans_map_set(pblk, lba, ppa);
  121. }
  122. spin_unlock(&pblk->trans_lock);
  123. }
  124. struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
  125. {
  126. mempool_t *pool;
  127. struct nvm_rq *rqd;
  128. int rq_size;
  129. if (rw == WRITE) {
  130. pool = pblk->w_rq_pool;
  131. rq_size = pblk_w_rq_size;
  132. } else {
  133. pool = pblk->g_rq_pool;
  134. rq_size = pblk_g_rq_size;
  135. }
  136. rqd = mempool_alloc(pool, GFP_KERNEL);
  137. memset(rqd, 0, rq_size);
  138. return rqd;
  139. }
  140. void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
  141. {
  142. mempool_t *pool;
  143. if (rw == WRITE)
  144. pool = pblk->w_rq_pool;
  145. else
  146. pool = pblk->g_rq_pool;
  147. mempool_free(rqd, pool);
  148. }
  149. void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
  150. int nr_pages)
  151. {
  152. struct bio_vec bv;
  153. int i;
  154. WARN_ON(off + nr_pages != bio->bi_vcnt);
  155. bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
  156. for (i = off; i < nr_pages + off; i++) {
  157. bv = bio->bi_io_vec[i];
  158. mempool_free(bv.bv_page, pblk->page_pool);
  159. }
  160. }
  161. int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
  162. int nr_pages)
  163. {
  164. struct request_queue *q = pblk->dev->q;
  165. struct page *page;
  166. int i, ret;
  167. for (i = 0; i < nr_pages; i++) {
  168. page = mempool_alloc(pblk->page_pool, flags);
  169. if (!page)
  170. goto err;
  171. ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
  172. if (ret != PBLK_EXPOSED_PAGE_SIZE) {
  173. pr_err("pblk: could not add page to bio\n");
  174. mempool_free(page, pblk->page_pool);
  175. goto err;
  176. }
  177. }
  178. return 0;
  179. err:
  180. pblk_bio_free_pages(pblk, bio, 0, i - 1);
  181. return -1;
  182. }
  183. static void pblk_write_kick(struct pblk *pblk)
  184. {
  185. wake_up_process(pblk->writer_ts);
  186. mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
  187. }
  188. void pblk_write_timer_fn(unsigned long data)
  189. {
  190. struct pblk *pblk = (struct pblk *)data;
  191. /* kick the write thread every tick to flush outstanding data */
  192. pblk_write_kick(pblk);
  193. }
  194. void pblk_write_should_kick(struct pblk *pblk)
  195. {
  196. unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
  197. if (secs_avail >= pblk->min_write_pgs)
  198. pblk_write_kick(pblk);
  199. }
  200. void pblk_end_bio_sync(struct bio *bio)
  201. {
  202. struct completion *waiting = bio->bi_private;
  203. complete(waiting);
  204. }
  205. void pblk_end_io_sync(struct nvm_rq *rqd)
  206. {
  207. struct completion *waiting = rqd->private;
  208. complete(waiting);
  209. }
  210. void pblk_wait_for_meta(struct pblk *pblk)
  211. {
  212. do {
  213. if (!atomic_read(&pblk->inflight_io))
  214. break;
  215. schedule();
  216. } while (1);
  217. }
  218. static void pblk_flush_writer(struct pblk *pblk)
  219. {
  220. pblk_rb_flush(&pblk->rwb);
  221. do {
  222. if (!pblk_rb_sync_count(&pblk->rwb))
  223. break;
  224. pblk_write_kick(pblk);
  225. schedule();
  226. } while (1);
  227. }
  228. struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
  229. {
  230. struct pblk_line_meta *lm = &pblk->lm;
  231. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  232. struct list_head *move_list = NULL;
  233. int vsc = le32_to_cpu(*line->vsc);
  234. lockdep_assert_held(&line->lock);
  235. if (!vsc) {
  236. if (line->gc_group != PBLK_LINEGC_FULL) {
  237. line->gc_group = PBLK_LINEGC_FULL;
  238. move_list = &l_mg->gc_full_list;
  239. }
  240. } else if (vsc < lm->high_thrs) {
  241. if (line->gc_group != PBLK_LINEGC_HIGH) {
  242. line->gc_group = PBLK_LINEGC_HIGH;
  243. move_list = &l_mg->gc_high_list;
  244. }
  245. } else if (vsc < lm->mid_thrs) {
  246. if (line->gc_group != PBLK_LINEGC_MID) {
  247. line->gc_group = PBLK_LINEGC_MID;
  248. move_list = &l_mg->gc_mid_list;
  249. }
  250. } else if (vsc < line->sec_in_line) {
  251. if (line->gc_group != PBLK_LINEGC_LOW) {
  252. line->gc_group = PBLK_LINEGC_LOW;
  253. move_list = &l_mg->gc_low_list;
  254. }
  255. } else if (vsc == line->sec_in_line) {
  256. if (line->gc_group != PBLK_LINEGC_EMPTY) {
  257. line->gc_group = PBLK_LINEGC_EMPTY;
  258. move_list = &l_mg->gc_empty_list;
  259. }
  260. } else {
  261. line->state = PBLK_LINESTATE_CORRUPT;
  262. line->gc_group = PBLK_LINEGC_NONE;
  263. move_list = &l_mg->corrupt_list;
  264. pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
  265. line->id, vsc,
  266. line->sec_in_line,
  267. lm->high_thrs, lm->mid_thrs);
  268. }
  269. return move_list;
  270. }
  271. void pblk_discard(struct pblk *pblk, struct bio *bio)
  272. {
  273. sector_t slba = pblk_get_lba(bio);
  274. sector_t nr_secs = pblk_get_secs(bio);
  275. pblk_invalidate_range(pblk, slba, nr_secs);
  276. }
  277. struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
  278. {
  279. struct ppa_addr ppa;
  280. spin_lock(&pblk->trans_lock);
  281. ppa = pblk_trans_map_get(pblk, lba);
  282. spin_unlock(&pblk->trans_lock);
  283. return ppa;
  284. }
  285. void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
  286. {
  287. atomic_long_inc(&pblk->write_failed);
  288. #ifdef CONFIG_NVM_DEBUG
  289. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  290. #endif
  291. }
  292. void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
  293. {
  294. /* Empty page read is not necessarily an error (e.g., L2P recovery) */
  295. if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
  296. atomic_long_inc(&pblk->read_empty);
  297. return;
  298. }
  299. switch (rqd->error) {
  300. case NVM_RSP_WARN_HIGHECC:
  301. atomic_long_inc(&pblk->read_high_ecc);
  302. break;
  303. case NVM_RSP_ERR_FAILECC:
  304. case NVM_RSP_ERR_FAILCRC:
  305. atomic_long_inc(&pblk->read_failed);
  306. break;
  307. default:
  308. pr_err("pblk: unknown read error:%d\n", rqd->error);
  309. }
  310. #ifdef CONFIG_NVM_DEBUG
  311. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  312. #endif
  313. }
  314. void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
  315. {
  316. pblk->sec_per_write = sec_per_write;
  317. }
  318. int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
  319. {
  320. struct nvm_tgt_dev *dev = pblk->dev;
  321. #ifdef CONFIG_NVM_DEBUG
  322. struct ppa_addr *ppa_list;
  323. ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
  324. if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
  325. WARN_ON(1);
  326. return -EINVAL;
  327. }
  328. if (rqd->opcode == NVM_OP_PWRITE) {
  329. struct pblk_line *line;
  330. struct ppa_addr ppa;
  331. int i;
  332. for (i = 0; i < rqd->nr_ppas; i++) {
  333. ppa = ppa_list[i];
  334. line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
  335. spin_lock(&line->lock);
  336. if (line->state != PBLK_LINESTATE_OPEN) {
  337. pr_err("pblk: bad ppa: line:%d,state:%d\n",
  338. line->id, line->state);
  339. WARN_ON(1);
  340. spin_unlock(&line->lock);
  341. return -EINVAL;
  342. }
  343. spin_unlock(&line->lock);
  344. }
  345. }
  346. #endif
  347. atomic_inc(&pblk->inflight_io);
  348. return nvm_submit_io(dev, rqd);
  349. }
  350. struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
  351. unsigned int nr_secs, unsigned int len,
  352. int alloc_type, gfp_t gfp_mask)
  353. {
  354. struct nvm_tgt_dev *dev = pblk->dev;
  355. void *kaddr = data;
  356. struct page *page;
  357. struct bio *bio;
  358. int i, ret;
  359. if (alloc_type == PBLK_KMALLOC_META)
  360. return bio_map_kern(dev->q, kaddr, len, gfp_mask);
  361. bio = bio_kmalloc(gfp_mask, nr_secs);
  362. if (!bio)
  363. return ERR_PTR(-ENOMEM);
  364. for (i = 0; i < nr_secs; i++) {
  365. page = vmalloc_to_page(kaddr);
  366. if (!page) {
  367. pr_err("pblk: could not map vmalloc bio\n");
  368. bio_put(bio);
  369. bio = ERR_PTR(-ENOMEM);
  370. goto out;
  371. }
  372. ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
  373. if (ret != PAGE_SIZE) {
  374. pr_err("pblk: could not add page to bio\n");
  375. bio_put(bio);
  376. bio = ERR_PTR(-ENOMEM);
  377. goto out;
  378. }
  379. kaddr += PAGE_SIZE;
  380. }
  381. out:
  382. return bio;
  383. }
  384. int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
  385. unsigned long secs_to_flush)
  386. {
  387. int max = pblk->sec_per_write;
  388. int min = pblk->min_write_pgs;
  389. int secs_to_sync = 0;
  390. if (secs_avail >= max)
  391. secs_to_sync = max;
  392. else if (secs_avail >= min)
  393. secs_to_sync = min * (secs_avail / min);
  394. else if (secs_to_flush)
  395. secs_to_sync = min;
  396. return secs_to_sync;
  397. }
  398. void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  399. {
  400. u64 addr;
  401. int i;
  402. addr = find_next_zero_bit(line->map_bitmap,
  403. pblk->lm.sec_per_line, line->cur_sec);
  404. line->cur_sec = addr - nr_secs;
  405. for (i = 0; i < nr_secs; i++, line->cur_sec--)
  406. WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
  407. }
  408. u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  409. {
  410. u64 addr;
  411. int i;
  412. lockdep_assert_held(&line->lock);
  413. /* logic error: ppa out-of-bounds. Prevent generating bad address */
  414. if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
  415. WARN(1, "pblk: page allocation out of bounds\n");
  416. nr_secs = pblk->lm.sec_per_line - line->cur_sec;
  417. }
  418. line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
  419. pblk->lm.sec_per_line, line->cur_sec);
  420. for (i = 0; i < nr_secs; i++, line->cur_sec++)
  421. WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
  422. return addr;
  423. }
  424. u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  425. {
  426. u64 addr;
  427. /* Lock needed in case a write fails and a recovery needs to remap
  428. * failed write buffer entries
  429. */
  430. spin_lock(&line->lock);
  431. addr = __pblk_alloc_page(pblk, line, nr_secs);
  432. line->left_msecs -= nr_secs;
  433. WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
  434. spin_unlock(&line->lock);
  435. return addr;
  436. }
  437. u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
  438. {
  439. u64 paddr;
  440. spin_lock(&line->lock);
  441. paddr = find_next_zero_bit(line->map_bitmap,
  442. pblk->lm.sec_per_line, line->cur_sec);
  443. spin_unlock(&line->lock);
  444. return paddr;
  445. }
  446. /*
  447. * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
  448. * taking the per LUN semaphore.
  449. */
  450. static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
  451. void *emeta_buf, u64 paddr, int dir)
  452. {
  453. struct nvm_tgt_dev *dev = pblk->dev;
  454. struct nvm_geo *geo = &dev->geo;
  455. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  456. struct pblk_line_meta *lm = &pblk->lm;
  457. void *ppa_list, *meta_list;
  458. struct bio *bio;
  459. struct nvm_rq rqd;
  460. dma_addr_t dma_ppa_list, dma_meta_list;
  461. int min = pblk->min_write_pgs;
  462. int left_ppas = lm->emeta_sec[0];
  463. int id = line->id;
  464. int rq_ppas, rq_len;
  465. int cmd_op, bio_op;
  466. int i, j;
  467. int ret;
  468. DECLARE_COMPLETION_ONSTACK(wait);
  469. if (dir == WRITE) {
  470. bio_op = REQ_OP_WRITE;
  471. cmd_op = NVM_OP_PWRITE;
  472. } else if (dir == READ) {
  473. bio_op = REQ_OP_READ;
  474. cmd_op = NVM_OP_PREAD;
  475. } else
  476. return -EINVAL;
  477. meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  478. &dma_meta_list);
  479. if (!meta_list)
  480. return -ENOMEM;
  481. ppa_list = meta_list + pblk_dma_meta_size;
  482. dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
  483. next_rq:
  484. memset(&rqd, 0, sizeof(struct nvm_rq));
  485. rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
  486. rq_len = rq_ppas * geo->sec_size;
  487. bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
  488. l_mg->emeta_alloc_type, GFP_KERNEL);
  489. if (IS_ERR(bio)) {
  490. ret = PTR_ERR(bio);
  491. goto free_rqd_dma;
  492. }
  493. bio->bi_iter.bi_sector = 0; /* internal bio */
  494. bio_set_op_attrs(bio, bio_op, 0);
  495. rqd.bio = bio;
  496. rqd.meta_list = meta_list;
  497. rqd.ppa_list = ppa_list;
  498. rqd.dma_meta_list = dma_meta_list;
  499. rqd.dma_ppa_list = dma_ppa_list;
  500. rqd.opcode = cmd_op;
  501. rqd.nr_ppas = rq_ppas;
  502. rqd.end_io = pblk_end_io_sync;
  503. rqd.private = &wait;
  504. if (dir == WRITE) {
  505. struct pblk_sec_meta *meta_list = rqd.meta_list;
  506. rqd.flags = pblk_set_progr_mode(pblk, WRITE);
  507. for (i = 0; i < rqd.nr_ppas; ) {
  508. spin_lock(&line->lock);
  509. paddr = __pblk_alloc_page(pblk, line, min);
  510. spin_unlock(&line->lock);
  511. for (j = 0; j < min; j++, i++, paddr++) {
  512. meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
  513. rqd.ppa_list[i] =
  514. addr_to_gen_ppa(pblk, paddr, id);
  515. }
  516. }
  517. } else {
  518. for (i = 0; i < rqd.nr_ppas; ) {
  519. struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
  520. int pos = pblk_dev_ppa_to_pos(geo, ppa);
  521. int read_type = PBLK_READ_RANDOM;
  522. if (pblk_io_aligned(pblk, rq_ppas))
  523. read_type = PBLK_READ_SEQUENTIAL;
  524. rqd.flags = pblk_set_read_mode(pblk, read_type);
  525. while (test_bit(pos, line->blk_bitmap)) {
  526. paddr += min;
  527. if (pblk_boundary_paddr_checks(pblk, paddr)) {
  528. pr_err("pblk: corrupt emeta line:%d\n",
  529. line->id);
  530. bio_put(bio);
  531. ret = -EINTR;
  532. goto free_rqd_dma;
  533. }
  534. ppa = addr_to_gen_ppa(pblk, paddr, id);
  535. pos = pblk_dev_ppa_to_pos(geo, ppa);
  536. }
  537. if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
  538. pr_err("pblk: corrupt emeta line:%d\n",
  539. line->id);
  540. bio_put(bio);
  541. ret = -EINTR;
  542. goto free_rqd_dma;
  543. }
  544. for (j = 0; j < min; j++, i++, paddr++)
  545. rqd.ppa_list[i] =
  546. addr_to_gen_ppa(pblk, paddr, line->id);
  547. }
  548. }
  549. ret = pblk_submit_io(pblk, &rqd);
  550. if (ret) {
  551. pr_err("pblk: emeta I/O submission failed: %d\n", ret);
  552. bio_put(bio);
  553. goto free_rqd_dma;
  554. }
  555. if (!wait_for_completion_io_timeout(&wait,
  556. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  557. pr_err("pblk: emeta I/O timed out\n");
  558. }
  559. atomic_dec(&pblk->inflight_io);
  560. reinit_completion(&wait);
  561. if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
  562. bio_put(bio);
  563. if (rqd.error) {
  564. if (dir == WRITE)
  565. pblk_log_write_err(pblk, &rqd);
  566. else
  567. pblk_log_read_err(pblk, &rqd);
  568. }
  569. emeta_buf += rq_len;
  570. left_ppas -= rq_ppas;
  571. if (left_ppas)
  572. goto next_rq;
  573. free_rqd_dma:
  574. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  575. return ret;
  576. }
  577. u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
  578. {
  579. struct nvm_tgt_dev *dev = pblk->dev;
  580. struct nvm_geo *geo = &dev->geo;
  581. struct pblk_line_meta *lm = &pblk->lm;
  582. int bit;
  583. /* This usually only happens on bad lines */
  584. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  585. if (bit >= lm->blk_per_line)
  586. return -1;
  587. return bit * geo->sec_per_pl;
  588. }
  589. static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
  590. u64 paddr, int dir)
  591. {
  592. struct nvm_tgt_dev *dev = pblk->dev;
  593. struct pblk_line_meta *lm = &pblk->lm;
  594. struct bio *bio;
  595. struct nvm_rq rqd;
  596. __le64 *lba_list = NULL;
  597. int i, ret;
  598. int cmd_op, bio_op;
  599. int flags;
  600. DECLARE_COMPLETION_ONSTACK(wait);
  601. if (dir == WRITE) {
  602. bio_op = REQ_OP_WRITE;
  603. cmd_op = NVM_OP_PWRITE;
  604. flags = pblk_set_progr_mode(pblk, WRITE);
  605. lba_list = emeta_to_lbas(pblk, line->emeta->buf);
  606. } else if (dir == READ) {
  607. bio_op = REQ_OP_READ;
  608. cmd_op = NVM_OP_PREAD;
  609. flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
  610. } else
  611. return -EINVAL;
  612. memset(&rqd, 0, sizeof(struct nvm_rq));
  613. rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  614. &rqd.dma_meta_list);
  615. if (!rqd.meta_list)
  616. return -ENOMEM;
  617. rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
  618. rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
  619. bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
  620. if (IS_ERR(bio)) {
  621. ret = PTR_ERR(bio);
  622. goto free_ppa_list;
  623. }
  624. bio->bi_iter.bi_sector = 0; /* internal bio */
  625. bio_set_op_attrs(bio, bio_op, 0);
  626. rqd.bio = bio;
  627. rqd.opcode = cmd_op;
  628. rqd.flags = flags;
  629. rqd.nr_ppas = lm->smeta_sec;
  630. rqd.end_io = pblk_end_io_sync;
  631. rqd.private = &wait;
  632. for (i = 0; i < lm->smeta_sec; i++, paddr++) {
  633. struct pblk_sec_meta *meta_list = rqd.meta_list;
  634. rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
  635. if (dir == WRITE) {
  636. __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
  637. meta_list[i].lba = lba_list[paddr] = addr_empty;
  638. }
  639. }
  640. /*
  641. * This I/O is sent by the write thread when a line is replace. Since
  642. * the write thread is the only one sending write and erase commands,
  643. * there is no need to take the LUN semaphore.
  644. */
  645. ret = pblk_submit_io(pblk, &rqd);
  646. if (ret) {
  647. pr_err("pblk: smeta I/O submission failed: %d\n", ret);
  648. bio_put(bio);
  649. goto free_ppa_list;
  650. }
  651. if (!wait_for_completion_io_timeout(&wait,
  652. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  653. pr_err("pblk: smeta I/O timed out\n");
  654. }
  655. atomic_dec(&pblk->inflight_io);
  656. if (rqd.error) {
  657. if (dir == WRITE)
  658. pblk_log_write_err(pblk, &rqd);
  659. else
  660. pblk_log_read_err(pblk, &rqd);
  661. }
  662. free_ppa_list:
  663. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  664. return ret;
  665. }
  666. int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
  667. {
  668. u64 bpaddr = pblk_line_smeta_start(pblk, line);
  669. return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
  670. }
  671. int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
  672. void *emeta_buf)
  673. {
  674. return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
  675. line->emeta_ssec, READ);
  676. }
  677. static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
  678. struct ppa_addr ppa)
  679. {
  680. rqd->opcode = NVM_OP_ERASE;
  681. rqd->ppa_addr = ppa;
  682. rqd->nr_ppas = 1;
  683. rqd->flags = pblk_set_progr_mode(pblk, ERASE);
  684. rqd->bio = NULL;
  685. }
  686. static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
  687. {
  688. struct nvm_rq rqd;
  689. int ret = 0;
  690. DECLARE_COMPLETION_ONSTACK(wait);
  691. memset(&rqd, 0, sizeof(struct nvm_rq));
  692. pblk_setup_e_rq(pblk, &rqd, ppa);
  693. rqd.end_io = pblk_end_io_sync;
  694. rqd.private = &wait;
  695. /* The write thread schedules erases so that it minimizes disturbances
  696. * with writes. Thus, there is no need to take the LUN semaphore.
  697. */
  698. ret = pblk_submit_io(pblk, &rqd);
  699. if (ret) {
  700. struct nvm_tgt_dev *dev = pblk->dev;
  701. struct nvm_geo *geo = &dev->geo;
  702. pr_err("pblk: could not sync erase line:%d,blk:%d\n",
  703. pblk_dev_ppa_to_line(ppa),
  704. pblk_dev_ppa_to_pos(geo, ppa));
  705. rqd.error = ret;
  706. goto out;
  707. }
  708. if (!wait_for_completion_io_timeout(&wait,
  709. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  710. pr_err("pblk: sync erase timed out\n");
  711. }
  712. out:
  713. rqd.private = pblk;
  714. __pblk_end_io_erase(pblk, &rqd);
  715. return ret;
  716. }
  717. int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
  718. {
  719. struct pblk_line_meta *lm = &pblk->lm;
  720. struct ppa_addr ppa;
  721. int ret, bit = -1;
  722. /* Erase only good blocks, one at a time */
  723. do {
  724. spin_lock(&line->lock);
  725. bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
  726. bit + 1);
  727. if (bit >= lm->blk_per_line) {
  728. spin_unlock(&line->lock);
  729. break;
  730. }
  731. ppa = pblk->luns[bit].bppa; /* set ch and lun */
  732. ppa.g.blk = line->id;
  733. atomic_dec(&line->left_eblks);
  734. WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
  735. spin_unlock(&line->lock);
  736. ret = pblk_blk_erase_sync(pblk, ppa);
  737. if (ret) {
  738. pr_err("pblk: failed to erase line %d\n", line->id);
  739. return ret;
  740. }
  741. } while (1);
  742. return 0;
  743. }
  744. static void pblk_line_setup_metadata(struct pblk_line *line,
  745. struct pblk_line_mgmt *l_mg,
  746. struct pblk_line_meta *lm)
  747. {
  748. int meta_line;
  749. lockdep_assert_held(&l_mg->free_lock);
  750. retry_meta:
  751. meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  752. if (meta_line == PBLK_DATA_LINES) {
  753. spin_unlock(&l_mg->free_lock);
  754. io_schedule();
  755. spin_lock(&l_mg->free_lock);
  756. goto retry_meta;
  757. }
  758. set_bit(meta_line, &l_mg->meta_bitmap);
  759. line->meta_line = meta_line;
  760. line->smeta = l_mg->sline_meta[meta_line];
  761. line->emeta = l_mg->eline_meta[meta_line];
  762. memset(line->smeta, 0, lm->smeta_len);
  763. memset(line->emeta->buf, 0, lm->emeta_len[0]);
  764. line->emeta->mem = 0;
  765. atomic_set(&line->emeta->sync, 0);
  766. }
  767. /* For now lines are always assumed full lines. Thus, smeta former and current
  768. * lun bitmaps are omitted.
  769. */
  770. static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
  771. struct pblk_line *cur)
  772. {
  773. struct nvm_tgt_dev *dev = pblk->dev;
  774. struct nvm_geo *geo = &dev->geo;
  775. struct pblk_line_meta *lm = &pblk->lm;
  776. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  777. struct pblk_emeta *emeta = line->emeta;
  778. struct line_emeta *emeta_buf = emeta->buf;
  779. struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
  780. int nr_blk_line;
  781. /* After erasing the line, new bad blocks might appear and we risk
  782. * having an invalid line
  783. */
  784. nr_blk_line = lm->blk_per_line -
  785. bitmap_weight(line->blk_bitmap, lm->blk_per_line);
  786. if (nr_blk_line < lm->min_blk_line) {
  787. spin_lock(&l_mg->free_lock);
  788. spin_lock(&line->lock);
  789. line->state = PBLK_LINESTATE_BAD;
  790. spin_unlock(&line->lock);
  791. list_add_tail(&line->list, &l_mg->bad_list);
  792. spin_unlock(&l_mg->free_lock);
  793. pr_debug("pblk: line %d is bad\n", line->id);
  794. return 0;
  795. }
  796. /* Run-time metadata */
  797. line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
  798. /* Mark LUNs allocated in this line (all for now) */
  799. bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
  800. smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
  801. memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
  802. smeta_buf->header.id = cpu_to_le32(line->id);
  803. smeta_buf->header.type = cpu_to_le16(line->type);
  804. smeta_buf->header.version = cpu_to_le16(1);
  805. /* Start metadata */
  806. smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
  807. smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
  808. /* Fill metadata among lines */
  809. if (cur) {
  810. memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
  811. smeta_buf->prev_id = cpu_to_le32(cur->id);
  812. cur->emeta->buf->next_id = cpu_to_le32(line->id);
  813. } else {
  814. smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
  815. }
  816. /* All smeta must be set at this point */
  817. smeta_buf->header.crc = cpu_to_le32(
  818. pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
  819. smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
  820. /* End metadata */
  821. memcpy(&emeta_buf->header, &smeta_buf->header,
  822. sizeof(struct line_header));
  823. emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
  824. emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
  825. emeta_buf->nr_valid_lbas = cpu_to_le64(0);
  826. emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
  827. emeta_buf->crc = cpu_to_le32(0);
  828. emeta_buf->prev_id = smeta_buf->prev_id;
  829. return 1;
  830. }
  831. /* For now lines are always assumed full lines. Thus, smeta former and current
  832. * lun bitmaps are omitted.
  833. */
  834. static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
  835. int init)
  836. {
  837. struct nvm_tgt_dev *dev = pblk->dev;
  838. struct nvm_geo *geo = &dev->geo;
  839. struct pblk_line_meta *lm = &pblk->lm;
  840. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  841. int nr_bb = 0;
  842. u64 off;
  843. int bit = -1;
  844. line->sec_in_line = lm->sec_per_line;
  845. /* Capture bad block information on line mapping bitmaps */
  846. while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
  847. bit + 1)) < lm->blk_per_line) {
  848. off = bit * geo->sec_per_pl;
  849. bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
  850. lm->sec_per_line);
  851. bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
  852. lm->sec_per_line);
  853. line->sec_in_line -= geo->sec_per_blk;
  854. if (bit >= lm->emeta_bb)
  855. nr_bb++;
  856. }
  857. /* Mark smeta metadata sectors as bad sectors */
  858. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  859. off = bit * geo->sec_per_pl;
  860. bitmap_set(line->map_bitmap, off, lm->smeta_sec);
  861. line->sec_in_line -= lm->smeta_sec;
  862. line->smeta_ssec = off;
  863. line->cur_sec = off + lm->smeta_sec;
  864. if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
  865. pr_debug("pblk: line smeta I/O failed. Retry\n");
  866. return 1;
  867. }
  868. bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
  869. /* Mark emeta metadata sectors as bad sectors. We need to consider bad
  870. * blocks to make sure that there are enough sectors to store emeta
  871. */
  872. bit = lm->sec_per_line;
  873. off = lm->sec_per_line - lm->emeta_sec[0];
  874. bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
  875. while (nr_bb) {
  876. off -= geo->sec_per_pl;
  877. if (!test_bit(off, line->invalid_bitmap)) {
  878. bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
  879. nr_bb--;
  880. }
  881. }
  882. line->sec_in_line -= lm->emeta_sec[0];
  883. line->emeta_ssec = off;
  884. line->nr_valid_lbas = 0;
  885. line->left_msecs = line->sec_in_line;
  886. *line->vsc = cpu_to_le32(line->sec_in_line);
  887. if (lm->sec_per_line - line->sec_in_line !=
  888. bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
  889. spin_lock(&line->lock);
  890. line->state = PBLK_LINESTATE_BAD;
  891. spin_unlock(&line->lock);
  892. list_add_tail(&line->list, &l_mg->bad_list);
  893. pr_err("pblk: unexpected line %d is bad\n", line->id);
  894. return 0;
  895. }
  896. return 1;
  897. }
  898. static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
  899. {
  900. struct pblk_line_meta *lm = &pblk->lm;
  901. int blk_in_line = atomic_read(&line->blk_in_line);
  902. line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
  903. if (!line->map_bitmap)
  904. return -ENOMEM;
  905. memset(line->map_bitmap, 0, lm->sec_bitmap_len);
  906. /* invalid_bitmap is special since it is used when line is closed. No
  907. * need to zeroized; it will be initialized using bb info form
  908. * map_bitmap
  909. */
  910. line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
  911. if (!line->invalid_bitmap) {
  912. mempool_free(line->map_bitmap, pblk->line_meta_pool);
  913. return -ENOMEM;
  914. }
  915. spin_lock(&line->lock);
  916. if (line->state != PBLK_LINESTATE_FREE) {
  917. mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
  918. mempool_free(line->map_bitmap, pblk->line_meta_pool);
  919. spin_unlock(&line->lock);
  920. WARN(1, "pblk: corrupted line %d, state %d\n",
  921. line->id, line->state);
  922. return -EAGAIN;
  923. }
  924. line->state = PBLK_LINESTATE_OPEN;
  925. atomic_set(&line->left_eblks, blk_in_line);
  926. atomic_set(&line->left_seblks, blk_in_line);
  927. line->meta_distance = lm->meta_distance;
  928. spin_unlock(&line->lock);
  929. /* Bad blocks do not need to be erased */
  930. bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
  931. kref_init(&line->ref);
  932. return 0;
  933. }
  934. int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
  935. {
  936. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  937. int ret;
  938. spin_lock(&l_mg->free_lock);
  939. l_mg->data_line = line;
  940. list_del(&line->list);
  941. ret = pblk_line_prepare(pblk, line);
  942. if (ret) {
  943. list_add(&line->list, &l_mg->free_list);
  944. spin_unlock(&l_mg->free_lock);
  945. return ret;
  946. }
  947. spin_unlock(&l_mg->free_lock);
  948. pblk_rl_free_lines_dec(&pblk->rl, line);
  949. if (!pblk_line_init_bb(pblk, line, 0)) {
  950. list_add(&line->list, &l_mg->free_list);
  951. return -EINTR;
  952. }
  953. return 0;
  954. }
  955. void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
  956. {
  957. mempool_free(line->map_bitmap, pblk->line_meta_pool);
  958. line->map_bitmap = NULL;
  959. line->smeta = NULL;
  960. line->emeta = NULL;
  961. }
  962. struct pblk_line *pblk_line_get(struct pblk *pblk)
  963. {
  964. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  965. struct pblk_line_meta *lm = &pblk->lm;
  966. struct pblk_line *line;
  967. int ret, bit;
  968. lockdep_assert_held(&l_mg->free_lock);
  969. retry:
  970. if (list_empty(&l_mg->free_list)) {
  971. pr_err("pblk: no free lines\n");
  972. return NULL;
  973. }
  974. line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
  975. list_del(&line->list);
  976. l_mg->nr_free_lines--;
  977. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  978. if (unlikely(bit >= lm->blk_per_line)) {
  979. spin_lock(&line->lock);
  980. line->state = PBLK_LINESTATE_BAD;
  981. spin_unlock(&line->lock);
  982. list_add_tail(&line->list, &l_mg->bad_list);
  983. pr_debug("pblk: line %d is bad\n", line->id);
  984. goto retry;
  985. }
  986. ret = pblk_line_prepare(pblk, line);
  987. if (ret) {
  988. if (ret == -EAGAIN) {
  989. list_add(&line->list, &l_mg->corrupt_list);
  990. goto retry;
  991. } else {
  992. pr_err("pblk: failed to prepare line %d\n", line->id);
  993. list_add(&line->list, &l_mg->free_list);
  994. l_mg->nr_free_lines++;
  995. return NULL;
  996. }
  997. }
  998. return line;
  999. }
  1000. static struct pblk_line *pblk_line_retry(struct pblk *pblk,
  1001. struct pblk_line *line)
  1002. {
  1003. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1004. struct pblk_line *retry_line;
  1005. retry:
  1006. spin_lock(&l_mg->free_lock);
  1007. retry_line = pblk_line_get(pblk);
  1008. if (!retry_line) {
  1009. l_mg->data_line = NULL;
  1010. spin_unlock(&l_mg->free_lock);
  1011. return NULL;
  1012. }
  1013. retry_line->smeta = line->smeta;
  1014. retry_line->emeta = line->emeta;
  1015. retry_line->meta_line = line->meta_line;
  1016. pblk_line_free(pblk, line);
  1017. l_mg->data_line = retry_line;
  1018. spin_unlock(&l_mg->free_lock);
  1019. pblk_rl_free_lines_dec(&pblk->rl, retry_line);
  1020. if (pblk_line_erase(pblk, retry_line))
  1021. goto retry;
  1022. return retry_line;
  1023. }
  1024. static void pblk_set_space_limit(struct pblk *pblk)
  1025. {
  1026. struct pblk_rl *rl = &pblk->rl;
  1027. atomic_set(&rl->rb_space, 0);
  1028. }
  1029. struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
  1030. {
  1031. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1032. struct pblk_line *line;
  1033. int is_next = 0;
  1034. spin_lock(&l_mg->free_lock);
  1035. line = pblk_line_get(pblk);
  1036. if (!line) {
  1037. spin_unlock(&l_mg->free_lock);
  1038. return NULL;
  1039. }
  1040. line->seq_nr = l_mg->d_seq_nr++;
  1041. line->type = PBLK_LINETYPE_DATA;
  1042. l_mg->data_line = line;
  1043. pblk_line_setup_metadata(line, l_mg, &pblk->lm);
  1044. /* Allocate next line for preparation */
  1045. l_mg->data_next = pblk_line_get(pblk);
  1046. if (!l_mg->data_next) {
  1047. /* If we cannot get a new line, we need to stop the pipeline.
  1048. * Only allow as many writes in as we can store safely and then
  1049. * fail gracefully
  1050. */
  1051. pblk_set_space_limit(pblk);
  1052. l_mg->data_next = NULL;
  1053. } else {
  1054. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1055. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1056. is_next = 1;
  1057. }
  1058. spin_unlock(&l_mg->free_lock);
  1059. if (pblk_line_erase(pblk, line)) {
  1060. line = pblk_line_retry(pblk, line);
  1061. if (!line)
  1062. return NULL;
  1063. }
  1064. pblk_rl_free_lines_dec(&pblk->rl, line);
  1065. if (is_next)
  1066. pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
  1067. retry_setup:
  1068. if (!pblk_line_init_metadata(pblk, line, NULL)) {
  1069. line = pblk_line_retry(pblk, line);
  1070. if (!line)
  1071. return NULL;
  1072. goto retry_setup;
  1073. }
  1074. if (!pblk_line_init_bb(pblk, line, 1)) {
  1075. line = pblk_line_retry(pblk, line);
  1076. if (!line)
  1077. return NULL;
  1078. goto retry_setup;
  1079. }
  1080. return line;
  1081. }
  1082. static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
  1083. {
  1084. lockdep_assert_held(&pblk->l_mg.free_lock);
  1085. pblk_set_space_limit(pblk);
  1086. pblk->state = PBLK_STATE_STOPPING;
  1087. }
  1088. void pblk_pipeline_stop(struct pblk *pblk)
  1089. {
  1090. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1091. int ret;
  1092. spin_lock(&l_mg->free_lock);
  1093. if (pblk->state == PBLK_STATE_RECOVERING ||
  1094. pblk->state == PBLK_STATE_STOPPED) {
  1095. spin_unlock(&l_mg->free_lock);
  1096. return;
  1097. }
  1098. pblk->state = PBLK_STATE_RECOVERING;
  1099. spin_unlock(&l_mg->free_lock);
  1100. pblk_flush_writer(pblk);
  1101. pblk_wait_for_meta(pblk);
  1102. ret = pblk_recov_pad(pblk);
  1103. if (ret) {
  1104. pr_err("pblk: could not close data on teardown(%d)\n", ret);
  1105. return;
  1106. }
  1107. flush_workqueue(pblk->bb_wq);
  1108. pblk_line_close_meta_sync(pblk);
  1109. spin_lock(&l_mg->free_lock);
  1110. pblk->state = PBLK_STATE_STOPPED;
  1111. l_mg->data_line = NULL;
  1112. l_mg->data_next = NULL;
  1113. spin_unlock(&l_mg->free_lock);
  1114. }
  1115. void pblk_line_replace_data(struct pblk *pblk)
  1116. {
  1117. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1118. struct pblk_line *cur, *new;
  1119. unsigned int left_seblks;
  1120. int is_next = 0;
  1121. cur = l_mg->data_line;
  1122. new = l_mg->data_next;
  1123. if (!new)
  1124. return;
  1125. l_mg->data_line = new;
  1126. spin_lock(&l_mg->free_lock);
  1127. if (pblk->state != PBLK_STATE_RUNNING) {
  1128. l_mg->data_line = NULL;
  1129. l_mg->data_next = NULL;
  1130. spin_unlock(&l_mg->free_lock);
  1131. return;
  1132. }
  1133. pblk_line_setup_metadata(new, l_mg, &pblk->lm);
  1134. spin_unlock(&l_mg->free_lock);
  1135. retry_erase:
  1136. left_seblks = atomic_read(&new->left_seblks);
  1137. if (left_seblks) {
  1138. /* If line is not fully erased, erase it */
  1139. if (atomic_read(&new->left_eblks)) {
  1140. if (pblk_line_erase(pblk, new))
  1141. return;
  1142. } else {
  1143. io_schedule();
  1144. }
  1145. goto retry_erase;
  1146. }
  1147. retry_setup:
  1148. if (!pblk_line_init_metadata(pblk, new, cur)) {
  1149. new = pblk_line_retry(pblk, new);
  1150. if (!new)
  1151. return;
  1152. goto retry_setup;
  1153. }
  1154. if (!pblk_line_init_bb(pblk, new, 1)) {
  1155. new = pblk_line_retry(pblk, new);
  1156. if (!new)
  1157. return;
  1158. goto retry_setup;
  1159. }
  1160. /* Allocate next line for preparation */
  1161. spin_lock(&l_mg->free_lock);
  1162. l_mg->data_next = pblk_line_get(pblk);
  1163. if (!l_mg->data_next) {
  1164. /* If we cannot get a new line, we need to stop the pipeline.
  1165. * Only allow as many writes in as we can store safely and then
  1166. * fail gracefully
  1167. */
  1168. pblk_stop_writes(pblk, new);
  1169. l_mg->data_next = NULL;
  1170. } else {
  1171. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1172. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1173. is_next = 1;
  1174. }
  1175. spin_unlock(&l_mg->free_lock);
  1176. if (is_next)
  1177. pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
  1178. }
  1179. void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
  1180. {
  1181. if (line->map_bitmap)
  1182. mempool_free(line->map_bitmap, pblk->line_meta_pool);
  1183. if (line->invalid_bitmap)
  1184. mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
  1185. *line->vsc = cpu_to_le32(EMPTY_ENTRY);
  1186. line->map_bitmap = NULL;
  1187. line->invalid_bitmap = NULL;
  1188. line->smeta = NULL;
  1189. line->emeta = NULL;
  1190. }
  1191. void pblk_line_put(struct kref *ref)
  1192. {
  1193. struct pblk_line *line = container_of(ref, struct pblk_line, ref);
  1194. struct pblk *pblk = line->pblk;
  1195. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1196. spin_lock(&line->lock);
  1197. WARN_ON(line->state != PBLK_LINESTATE_GC);
  1198. line->state = PBLK_LINESTATE_FREE;
  1199. line->gc_group = PBLK_LINEGC_NONE;
  1200. pblk_line_free(pblk, line);
  1201. spin_unlock(&line->lock);
  1202. spin_lock(&l_mg->free_lock);
  1203. list_add_tail(&line->list, &l_mg->free_list);
  1204. l_mg->nr_free_lines++;
  1205. spin_unlock(&l_mg->free_lock);
  1206. pblk_rl_free_lines_inc(&pblk->rl, line);
  1207. }
  1208. int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
  1209. {
  1210. struct nvm_rq *rqd;
  1211. int err;
  1212. rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
  1213. memset(rqd, 0, pblk_g_rq_size);
  1214. pblk_setup_e_rq(pblk, rqd, ppa);
  1215. rqd->end_io = pblk_end_io_erase;
  1216. rqd->private = pblk;
  1217. /* The write thread schedules erases so that it minimizes disturbances
  1218. * with writes. Thus, there is no need to take the LUN semaphore.
  1219. */
  1220. err = pblk_submit_io(pblk, rqd);
  1221. if (err) {
  1222. struct nvm_tgt_dev *dev = pblk->dev;
  1223. struct nvm_geo *geo = &dev->geo;
  1224. pr_err("pblk: could not async erase line:%d,blk:%d\n",
  1225. pblk_dev_ppa_to_line(ppa),
  1226. pblk_dev_ppa_to_pos(geo, ppa));
  1227. }
  1228. return err;
  1229. }
  1230. struct pblk_line *pblk_line_get_data(struct pblk *pblk)
  1231. {
  1232. return pblk->l_mg.data_line;
  1233. }
  1234. /* For now, always erase next line */
  1235. struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
  1236. {
  1237. return pblk->l_mg.data_next;
  1238. }
  1239. int pblk_line_is_full(struct pblk_line *line)
  1240. {
  1241. return (line->left_msecs == 0);
  1242. }
  1243. void pblk_line_close_meta_sync(struct pblk *pblk)
  1244. {
  1245. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1246. struct pblk_line_meta *lm = &pblk->lm;
  1247. struct pblk_line *line, *tline;
  1248. LIST_HEAD(list);
  1249. spin_lock(&l_mg->close_lock);
  1250. if (list_empty(&l_mg->emeta_list)) {
  1251. spin_unlock(&l_mg->close_lock);
  1252. return;
  1253. }
  1254. list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
  1255. spin_unlock(&l_mg->close_lock);
  1256. list_for_each_entry_safe(line, tline, &list, list) {
  1257. struct pblk_emeta *emeta = line->emeta;
  1258. while (emeta->mem < lm->emeta_len[0]) {
  1259. int ret;
  1260. ret = pblk_submit_meta_io(pblk, line);
  1261. if (ret) {
  1262. pr_err("pblk: sync meta line %d failed (%d)\n",
  1263. line->id, ret);
  1264. return;
  1265. }
  1266. }
  1267. }
  1268. pblk_wait_for_meta(pblk);
  1269. flush_workqueue(pblk->close_wq);
  1270. }
  1271. static void pblk_line_should_sync_meta(struct pblk *pblk)
  1272. {
  1273. if (pblk_rl_is_limit(&pblk->rl))
  1274. pblk_line_close_meta_sync(pblk);
  1275. }
  1276. void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
  1277. {
  1278. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1279. struct list_head *move_list;
  1280. #ifdef CONFIG_NVM_DEBUG
  1281. struct pblk_line_meta *lm = &pblk->lm;
  1282. WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
  1283. "pblk: corrupt closed line %d\n", line->id);
  1284. #endif
  1285. spin_lock(&l_mg->free_lock);
  1286. WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
  1287. spin_unlock(&l_mg->free_lock);
  1288. spin_lock(&l_mg->gc_lock);
  1289. spin_lock(&line->lock);
  1290. WARN_ON(line->state != PBLK_LINESTATE_OPEN);
  1291. line->state = PBLK_LINESTATE_CLOSED;
  1292. move_list = pblk_line_gc_list(pblk, line);
  1293. list_add_tail(&line->list, move_list);
  1294. mempool_free(line->map_bitmap, pblk->line_meta_pool);
  1295. line->map_bitmap = NULL;
  1296. line->smeta = NULL;
  1297. line->emeta = NULL;
  1298. spin_unlock(&line->lock);
  1299. spin_unlock(&l_mg->gc_lock);
  1300. pblk_gc_should_kick(pblk);
  1301. }
  1302. void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
  1303. {
  1304. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1305. struct pblk_line_meta *lm = &pblk->lm;
  1306. struct pblk_emeta *emeta = line->emeta;
  1307. struct line_emeta *emeta_buf = emeta->buf;
  1308. /* No need for exact vsc value; avoid a big line lock and take aprox. */
  1309. memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
  1310. memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
  1311. emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
  1312. emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
  1313. spin_lock(&l_mg->close_lock);
  1314. spin_lock(&line->lock);
  1315. list_add_tail(&line->list, &l_mg->emeta_list);
  1316. spin_unlock(&line->lock);
  1317. spin_unlock(&l_mg->close_lock);
  1318. pblk_line_should_sync_meta(pblk);
  1319. }
  1320. void pblk_line_close_ws(struct work_struct *work)
  1321. {
  1322. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  1323. ws);
  1324. struct pblk *pblk = line_ws->pblk;
  1325. struct pblk_line *line = line_ws->line;
  1326. pblk_line_close(pblk, line);
  1327. mempool_free(line_ws, pblk->line_ws_pool);
  1328. }
  1329. void pblk_line_mark_bb(struct work_struct *work)
  1330. {
  1331. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  1332. ws);
  1333. struct pblk *pblk = line_ws->pblk;
  1334. struct nvm_tgt_dev *dev = pblk->dev;
  1335. struct ppa_addr *ppa = line_ws->priv;
  1336. int ret;
  1337. ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
  1338. if (ret) {
  1339. struct pblk_line *line;
  1340. int pos;
  1341. line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
  1342. pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
  1343. pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
  1344. line->id, pos);
  1345. }
  1346. kfree(ppa);
  1347. mempool_free(line_ws, pblk->line_ws_pool);
  1348. }
  1349. void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
  1350. void (*work)(struct work_struct *),
  1351. struct workqueue_struct *wq)
  1352. {
  1353. struct pblk_line_ws *line_ws;
  1354. line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
  1355. if (!line_ws)
  1356. return;
  1357. line_ws->pblk = pblk;
  1358. line_ws->line = line;
  1359. line_ws->priv = priv;
  1360. INIT_WORK(&line_ws->ws, work);
  1361. queue_work(wq, &line_ws->ws);
  1362. }
  1363. static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
  1364. int nr_ppas, int pos)
  1365. {
  1366. struct pblk_lun *rlun = &pblk->luns[pos];
  1367. int ret;
  1368. /*
  1369. * Only send one inflight I/O per LUN. Since we map at a page
  1370. * granurality, all ppas in the I/O will map to the same LUN
  1371. */
  1372. #ifdef CONFIG_NVM_DEBUG
  1373. int i;
  1374. for (i = 1; i < nr_ppas; i++)
  1375. WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
  1376. ppa_list[0].g.ch != ppa_list[i].g.ch);
  1377. #endif
  1378. ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
  1379. if (ret) {
  1380. switch (ret) {
  1381. case -ETIME:
  1382. pr_err("pblk: lun semaphore timed out\n");
  1383. break;
  1384. case -EINTR:
  1385. pr_err("pblk: lun semaphore timed out\n");
  1386. break;
  1387. }
  1388. }
  1389. }
  1390. void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
  1391. {
  1392. struct nvm_tgt_dev *dev = pblk->dev;
  1393. struct nvm_geo *geo = &dev->geo;
  1394. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1395. __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
  1396. }
  1397. void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1398. unsigned long *lun_bitmap)
  1399. {
  1400. struct nvm_tgt_dev *dev = pblk->dev;
  1401. struct nvm_geo *geo = &dev->geo;
  1402. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1403. /* If the LUN has been locked for this same request, do no attempt to
  1404. * lock it again
  1405. */
  1406. if (test_and_set_bit(pos, lun_bitmap))
  1407. return;
  1408. __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
  1409. }
  1410. void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
  1411. {
  1412. struct nvm_tgt_dev *dev = pblk->dev;
  1413. struct nvm_geo *geo = &dev->geo;
  1414. struct pblk_lun *rlun;
  1415. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1416. #ifdef CONFIG_NVM_DEBUG
  1417. int i;
  1418. for (i = 1; i < nr_ppas; i++)
  1419. WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
  1420. ppa_list[0].g.ch != ppa_list[i].g.ch);
  1421. #endif
  1422. rlun = &pblk->luns[pos];
  1423. up(&rlun->wr_sem);
  1424. }
  1425. void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1426. unsigned long *lun_bitmap)
  1427. {
  1428. struct nvm_tgt_dev *dev = pblk->dev;
  1429. struct nvm_geo *geo = &dev->geo;
  1430. struct pblk_lun *rlun;
  1431. int nr_luns = geo->nr_luns;
  1432. int bit = -1;
  1433. while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
  1434. rlun = &pblk->luns[bit];
  1435. up(&rlun->wr_sem);
  1436. }
  1437. kfree(lun_bitmap);
  1438. }
  1439. void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1440. {
  1441. struct ppa_addr l2p_ppa;
  1442. /* logic error: lba out-of-bounds. Ignore update */
  1443. if (!(lba < pblk->rl.nr_secs)) {
  1444. WARN(1, "pblk: corrupted L2P map request\n");
  1445. return;
  1446. }
  1447. spin_lock(&pblk->trans_lock);
  1448. l2p_ppa = pblk_trans_map_get(pblk, lba);
  1449. if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
  1450. pblk_map_invalidate(pblk, l2p_ppa);
  1451. pblk_trans_map_set(pblk, lba, ppa);
  1452. spin_unlock(&pblk->trans_lock);
  1453. }
  1454. void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1455. {
  1456. #ifdef CONFIG_NVM_DEBUG
  1457. /* Callers must ensure that the ppa points to a cache address */
  1458. BUG_ON(!pblk_addr_in_cache(ppa));
  1459. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
  1460. #endif
  1461. pblk_update_map(pblk, lba, ppa);
  1462. }
  1463. int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
  1464. struct pblk_line *gc_line)
  1465. {
  1466. struct ppa_addr l2p_ppa;
  1467. int ret = 1;
  1468. #ifdef CONFIG_NVM_DEBUG
  1469. /* Callers must ensure that the ppa points to a cache address */
  1470. BUG_ON(!pblk_addr_in_cache(ppa));
  1471. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
  1472. #endif
  1473. /* logic error: lba out-of-bounds. Ignore update */
  1474. if (!(lba < pblk->rl.nr_secs)) {
  1475. WARN(1, "pblk: corrupted L2P map request\n");
  1476. return 0;
  1477. }
  1478. spin_lock(&pblk->trans_lock);
  1479. l2p_ppa = pblk_trans_map_get(pblk, lba);
  1480. /* Prevent updated entries to be overwritten by GC */
  1481. if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
  1482. pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
  1483. ret = 0;
  1484. goto out;
  1485. }
  1486. pblk_trans_map_set(pblk, lba, ppa);
  1487. out:
  1488. spin_unlock(&pblk->trans_lock);
  1489. return ret;
  1490. }
  1491. void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
  1492. struct ppa_addr entry_line)
  1493. {
  1494. struct ppa_addr l2p_line;
  1495. #ifdef CONFIG_NVM_DEBUG
  1496. /* Callers must ensure that the ppa points to a device address */
  1497. BUG_ON(pblk_addr_in_cache(ppa));
  1498. #endif
  1499. /* Invalidate and discard padded entries */
  1500. if (lba == ADDR_EMPTY) {
  1501. #ifdef CONFIG_NVM_DEBUG
  1502. atomic_long_inc(&pblk->padded_wb);
  1503. #endif
  1504. pblk_map_invalidate(pblk, ppa);
  1505. return;
  1506. }
  1507. /* logic error: lba out-of-bounds. Ignore update */
  1508. if (!(lba < pblk->rl.nr_secs)) {
  1509. WARN(1, "pblk: corrupted L2P map request\n");
  1510. return;
  1511. }
  1512. spin_lock(&pblk->trans_lock);
  1513. l2p_line = pblk_trans_map_get(pblk, lba);
  1514. /* Do not update L2P if the cacheline has been updated. In this case,
  1515. * the mapped ppa must be invalidated
  1516. */
  1517. if (l2p_line.ppa != entry_line.ppa) {
  1518. if (!pblk_ppa_empty(ppa))
  1519. pblk_map_invalidate(pblk, ppa);
  1520. goto out;
  1521. }
  1522. #ifdef CONFIG_NVM_DEBUG
  1523. WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
  1524. #endif
  1525. pblk_trans_map_set(pblk, lba, ppa);
  1526. out:
  1527. spin_unlock(&pblk->trans_lock);
  1528. }
  1529. void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
  1530. sector_t blba, int nr_secs)
  1531. {
  1532. int i;
  1533. spin_lock(&pblk->trans_lock);
  1534. for (i = 0; i < nr_secs; i++)
  1535. ppas[i] = pblk_trans_map_get(pblk, blba + i);
  1536. spin_unlock(&pblk->trans_lock);
  1537. }
  1538. void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
  1539. u64 *lba_list, int nr_secs)
  1540. {
  1541. sector_t lba;
  1542. int i;
  1543. spin_lock(&pblk->trans_lock);
  1544. for (i = 0; i < nr_secs; i++) {
  1545. lba = lba_list[i];
  1546. if (lba == ADDR_EMPTY) {
  1547. ppas[i].ppa = ADDR_EMPTY;
  1548. } else {
  1549. /* logic error: lba out-of-bounds. Ignore update */
  1550. if (!(lba < pblk->rl.nr_secs)) {
  1551. WARN(1, "pblk: corrupted L2P map request\n");
  1552. continue;
  1553. }
  1554. ppas[i] = pblk_trans_map_get(pblk, lba);
  1555. }
  1556. }
  1557. spin_unlock(&pblk->trans_lock);
  1558. }