pblk-core.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-core.c - pblk's core functionality
  16. *
  17. */
  18. #include "pblk.h"
  19. static void pblk_line_mark_bb(struct work_struct *work)
  20. {
  21. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  22. ws);
  23. struct pblk *pblk = line_ws->pblk;
  24. struct nvm_tgt_dev *dev = pblk->dev;
  25. struct ppa_addr *ppa = line_ws->priv;
  26. int ret;
  27. ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
  28. if (ret) {
  29. struct pblk_line *line;
  30. int pos;
  31. line = pblk_ppa_to_line(pblk, *ppa);
  32. pos = pblk_ppa_to_pos(&dev->geo, *ppa);
  33. pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
  34. line->id, pos);
  35. }
  36. kfree(ppa);
  37. mempool_free(line_ws, &pblk->gen_ws_pool);
  38. }
  39. static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
  40. struct ppa_addr ppa_addr)
  41. {
  42. struct nvm_tgt_dev *dev = pblk->dev;
  43. struct nvm_geo *geo = &dev->geo;
  44. struct ppa_addr *ppa;
  45. int pos = pblk_ppa_to_pos(geo, ppa_addr);
  46. pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
  47. atomic_long_inc(&pblk->erase_failed);
  48. atomic_dec(&line->blk_in_line);
  49. if (test_and_set_bit(pos, line->blk_bitmap))
  50. pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
  51. line->id, pos);
  52. /* Not necessary to mark bad blocks on 2.0 spec. */
  53. if (geo->version == NVM_OCSSD_SPEC_20)
  54. return;
  55. ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
  56. if (!ppa)
  57. return;
  58. *ppa = ppa_addr;
  59. pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
  60. GFP_ATOMIC, pblk->bb_wq);
  61. }
  62. static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
  63. {
  64. struct nvm_tgt_dev *dev = pblk->dev;
  65. struct nvm_geo *geo = &dev->geo;
  66. struct nvm_chk_meta *chunk;
  67. struct pblk_line *line;
  68. int pos;
  69. line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
  70. pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
  71. chunk = &line->chks[pos];
  72. atomic_dec(&line->left_seblks);
  73. if (rqd->error) {
  74. chunk->state = NVM_CHK_ST_OFFLINE;
  75. pblk_mark_bb(pblk, line, rqd->ppa_addr);
  76. } else {
  77. chunk->state = NVM_CHK_ST_FREE;
  78. }
  79. atomic_dec(&pblk->inflight_io);
  80. }
  81. /* Erase completion assumes that only one block is erased at the time */
  82. static void pblk_end_io_erase(struct nvm_rq *rqd)
  83. {
  84. struct pblk *pblk = rqd->private;
  85. __pblk_end_io_erase(pblk, rqd);
  86. mempool_free(rqd, &pblk->e_rq_pool);
  87. }
  88. /*
  89. * Get information for all chunks from the device.
  90. *
  91. * The caller is responsible for freeing the returned structure
  92. */
  93. struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
  94. {
  95. struct nvm_tgt_dev *dev = pblk->dev;
  96. struct nvm_geo *geo = &dev->geo;
  97. struct nvm_chk_meta *meta;
  98. struct ppa_addr ppa;
  99. unsigned long len;
  100. int ret;
  101. ppa.ppa = 0;
  102. len = geo->all_chunks * sizeof(*meta);
  103. meta = kzalloc(len, GFP_KERNEL);
  104. if (!meta)
  105. return ERR_PTR(-ENOMEM);
  106. ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
  107. if (ret) {
  108. kfree(meta);
  109. return ERR_PTR(-EIO);
  110. }
  111. return meta;
  112. }
  113. struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
  114. struct nvm_chk_meta *meta,
  115. struct ppa_addr ppa)
  116. {
  117. struct nvm_tgt_dev *dev = pblk->dev;
  118. struct nvm_geo *geo = &dev->geo;
  119. int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
  120. int lun_off = ppa.m.pu * geo->num_chk;
  121. int chk_off = ppa.m.chk;
  122. return meta + ch_off + lun_off + chk_off;
  123. }
  124. void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
  125. u64 paddr)
  126. {
  127. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  128. struct list_head *move_list = NULL;
  129. /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
  130. * table is modified with reclaimed sectors, a check is done to endure
  131. * that newer updates are not overwritten.
  132. */
  133. spin_lock(&line->lock);
  134. WARN_ON(line->state == PBLK_LINESTATE_FREE);
  135. if (test_and_set_bit(paddr, line->invalid_bitmap)) {
  136. WARN_ONCE(1, "pblk: double invalidate\n");
  137. spin_unlock(&line->lock);
  138. return;
  139. }
  140. le32_add_cpu(line->vsc, -1);
  141. if (line->state == PBLK_LINESTATE_CLOSED)
  142. move_list = pblk_line_gc_list(pblk, line);
  143. spin_unlock(&line->lock);
  144. if (move_list) {
  145. spin_lock(&l_mg->gc_lock);
  146. spin_lock(&line->lock);
  147. /* Prevent moving a line that has just been chosen for GC */
  148. if (line->state == PBLK_LINESTATE_GC) {
  149. spin_unlock(&line->lock);
  150. spin_unlock(&l_mg->gc_lock);
  151. return;
  152. }
  153. spin_unlock(&line->lock);
  154. list_move_tail(&line->list, move_list);
  155. spin_unlock(&l_mg->gc_lock);
  156. }
  157. }
  158. void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
  159. {
  160. struct pblk_line *line;
  161. u64 paddr;
  162. #ifdef CONFIG_NVM_PBLK_DEBUG
  163. /* Callers must ensure that the ppa points to a device address */
  164. BUG_ON(pblk_addr_in_cache(ppa));
  165. BUG_ON(pblk_ppa_empty(ppa));
  166. #endif
  167. line = pblk_ppa_to_line(pblk, ppa);
  168. paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
  169. __pblk_map_invalidate(pblk, line, paddr);
  170. }
  171. static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
  172. unsigned int nr_secs)
  173. {
  174. sector_t lba;
  175. spin_lock(&pblk->trans_lock);
  176. for (lba = slba; lba < slba + nr_secs; lba++) {
  177. struct ppa_addr ppa;
  178. ppa = pblk_trans_map_get(pblk, lba);
  179. if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
  180. pblk_map_invalidate(pblk, ppa);
  181. pblk_ppa_set_empty(&ppa);
  182. pblk_trans_map_set(pblk, lba, ppa);
  183. }
  184. spin_unlock(&pblk->trans_lock);
  185. }
  186. /* Caller must guarantee that the request is a valid type */
  187. struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
  188. {
  189. mempool_t *pool;
  190. struct nvm_rq *rqd;
  191. int rq_size;
  192. switch (type) {
  193. case PBLK_WRITE:
  194. case PBLK_WRITE_INT:
  195. pool = &pblk->w_rq_pool;
  196. rq_size = pblk_w_rq_size;
  197. break;
  198. case PBLK_READ:
  199. pool = &pblk->r_rq_pool;
  200. rq_size = pblk_g_rq_size;
  201. break;
  202. default:
  203. pool = &pblk->e_rq_pool;
  204. rq_size = pblk_g_rq_size;
  205. }
  206. rqd = mempool_alloc(pool, GFP_KERNEL);
  207. memset(rqd, 0, rq_size);
  208. return rqd;
  209. }
  210. /* Typically used on completion path. Cannot guarantee request consistency */
  211. void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
  212. {
  213. struct nvm_tgt_dev *dev = pblk->dev;
  214. mempool_t *pool;
  215. switch (type) {
  216. case PBLK_WRITE:
  217. kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
  218. /* fall through */
  219. case PBLK_WRITE_INT:
  220. pool = &pblk->w_rq_pool;
  221. break;
  222. case PBLK_READ:
  223. pool = &pblk->r_rq_pool;
  224. break;
  225. case PBLK_ERASE:
  226. pool = &pblk->e_rq_pool;
  227. break;
  228. default:
  229. pblk_err(pblk, "trying to free unknown rqd type\n");
  230. return;
  231. }
  232. if (rqd->meta_list)
  233. nvm_dev_dma_free(dev->parent, rqd->meta_list,
  234. rqd->dma_meta_list);
  235. mempool_free(rqd, pool);
  236. }
  237. void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
  238. int nr_pages)
  239. {
  240. struct bio_vec bv;
  241. int i;
  242. WARN_ON(off + nr_pages != bio->bi_vcnt);
  243. for (i = off; i < nr_pages + off; i++) {
  244. bv = bio->bi_io_vec[i];
  245. mempool_free(bv.bv_page, &pblk->page_bio_pool);
  246. }
  247. }
  248. int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
  249. int nr_pages)
  250. {
  251. struct request_queue *q = pblk->dev->q;
  252. struct page *page;
  253. int i, ret;
  254. for (i = 0; i < nr_pages; i++) {
  255. page = mempool_alloc(&pblk->page_bio_pool, flags);
  256. ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
  257. if (ret != PBLK_EXPOSED_PAGE_SIZE) {
  258. pblk_err(pblk, "could not add page to bio\n");
  259. mempool_free(page, &pblk->page_bio_pool);
  260. goto err;
  261. }
  262. }
  263. return 0;
  264. err:
  265. pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
  266. return -1;
  267. }
  268. void pblk_write_kick(struct pblk *pblk)
  269. {
  270. wake_up_process(pblk->writer_ts);
  271. mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
  272. }
  273. void pblk_write_timer_fn(struct timer_list *t)
  274. {
  275. struct pblk *pblk = from_timer(pblk, t, wtimer);
  276. /* kick the write thread every tick to flush outstanding data */
  277. pblk_write_kick(pblk);
  278. }
  279. void pblk_write_should_kick(struct pblk *pblk)
  280. {
  281. unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
  282. if (secs_avail >= pblk->min_write_pgs)
  283. pblk_write_kick(pblk);
  284. }
  285. static void pblk_wait_for_meta(struct pblk *pblk)
  286. {
  287. do {
  288. if (!atomic_read(&pblk->inflight_io))
  289. break;
  290. schedule();
  291. } while (1);
  292. }
  293. static void pblk_flush_writer(struct pblk *pblk)
  294. {
  295. pblk_rb_flush(&pblk->rwb);
  296. do {
  297. if (!pblk_rb_sync_count(&pblk->rwb))
  298. break;
  299. pblk_write_kick(pblk);
  300. schedule();
  301. } while (1);
  302. }
  303. struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
  304. {
  305. struct pblk_line_meta *lm = &pblk->lm;
  306. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  307. struct list_head *move_list = NULL;
  308. int vsc = le32_to_cpu(*line->vsc);
  309. lockdep_assert_held(&line->lock);
  310. if (line->w_err_gc->has_write_err) {
  311. if (line->gc_group != PBLK_LINEGC_WERR) {
  312. line->gc_group = PBLK_LINEGC_WERR;
  313. move_list = &l_mg->gc_werr_list;
  314. pblk_rl_werr_line_in(&pblk->rl);
  315. }
  316. } else if (!vsc) {
  317. if (line->gc_group != PBLK_LINEGC_FULL) {
  318. line->gc_group = PBLK_LINEGC_FULL;
  319. move_list = &l_mg->gc_full_list;
  320. }
  321. } else if (vsc < lm->high_thrs) {
  322. if (line->gc_group != PBLK_LINEGC_HIGH) {
  323. line->gc_group = PBLK_LINEGC_HIGH;
  324. move_list = &l_mg->gc_high_list;
  325. }
  326. } else if (vsc < lm->mid_thrs) {
  327. if (line->gc_group != PBLK_LINEGC_MID) {
  328. line->gc_group = PBLK_LINEGC_MID;
  329. move_list = &l_mg->gc_mid_list;
  330. }
  331. } else if (vsc < line->sec_in_line) {
  332. if (line->gc_group != PBLK_LINEGC_LOW) {
  333. line->gc_group = PBLK_LINEGC_LOW;
  334. move_list = &l_mg->gc_low_list;
  335. }
  336. } else if (vsc == line->sec_in_line) {
  337. if (line->gc_group != PBLK_LINEGC_EMPTY) {
  338. line->gc_group = PBLK_LINEGC_EMPTY;
  339. move_list = &l_mg->gc_empty_list;
  340. }
  341. } else {
  342. line->state = PBLK_LINESTATE_CORRUPT;
  343. line->gc_group = PBLK_LINEGC_NONE;
  344. move_list = &l_mg->corrupt_list;
  345. pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
  346. line->id, vsc,
  347. line->sec_in_line,
  348. lm->high_thrs, lm->mid_thrs);
  349. }
  350. return move_list;
  351. }
  352. void pblk_discard(struct pblk *pblk, struct bio *bio)
  353. {
  354. sector_t slba = pblk_get_lba(bio);
  355. sector_t nr_secs = pblk_get_secs(bio);
  356. pblk_invalidate_range(pblk, slba, nr_secs);
  357. }
  358. void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
  359. {
  360. atomic_long_inc(&pblk->write_failed);
  361. #ifdef CONFIG_NVM_PBLK_DEBUG
  362. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  363. #endif
  364. }
  365. void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
  366. {
  367. /* Empty page read is not necessarily an error (e.g., L2P recovery) */
  368. if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
  369. atomic_long_inc(&pblk->read_empty);
  370. return;
  371. }
  372. switch (rqd->error) {
  373. case NVM_RSP_WARN_HIGHECC:
  374. atomic_long_inc(&pblk->read_high_ecc);
  375. break;
  376. case NVM_RSP_ERR_FAILECC:
  377. case NVM_RSP_ERR_FAILCRC:
  378. atomic_long_inc(&pblk->read_failed);
  379. break;
  380. default:
  381. pblk_err(pblk, "unknown read error:%d\n", rqd->error);
  382. }
  383. #ifdef CONFIG_NVM_PBLK_DEBUG
  384. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  385. #endif
  386. }
  387. void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
  388. {
  389. pblk->sec_per_write = sec_per_write;
  390. }
  391. int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
  392. {
  393. struct nvm_tgt_dev *dev = pblk->dev;
  394. atomic_inc(&pblk->inflight_io);
  395. #ifdef CONFIG_NVM_PBLK_DEBUG
  396. if (pblk_check_io(pblk, rqd))
  397. return NVM_IO_ERR;
  398. #endif
  399. return nvm_submit_io(dev, rqd);
  400. }
  401. int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
  402. {
  403. struct nvm_tgt_dev *dev = pblk->dev;
  404. atomic_inc(&pblk->inflight_io);
  405. #ifdef CONFIG_NVM_PBLK_DEBUG
  406. if (pblk_check_io(pblk, rqd))
  407. return NVM_IO_ERR;
  408. #endif
  409. return nvm_submit_io_sync(dev, rqd);
  410. }
  411. static void pblk_bio_map_addr_endio(struct bio *bio)
  412. {
  413. bio_put(bio);
  414. }
  415. struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
  416. unsigned int nr_secs, unsigned int len,
  417. int alloc_type, gfp_t gfp_mask)
  418. {
  419. struct nvm_tgt_dev *dev = pblk->dev;
  420. void *kaddr = data;
  421. struct page *page;
  422. struct bio *bio;
  423. int i, ret;
  424. if (alloc_type == PBLK_KMALLOC_META)
  425. return bio_map_kern(dev->q, kaddr, len, gfp_mask);
  426. bio = bio_kmalloc(gfp_mask, nr_secs);
  427. if (!bio)
  428. return ERR_PTR(-ENOMEM);
  429. for (i = 0; i < nr_secs; i++) {
  430. page = vmalloc_to_page(kaddr);
  431. if (!page) {
  432. pblk_err(pblk, "could not map vmalloc bio\n");
  433. bio_put(bio);
  434. bio = ERR_PTR(-ENOMEM);
  435. goto out;
  436. }
  437. ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
  438. if (ret != PAGE_SIZE) {
  439. pblk_err(pblk, "could not add page to bio\n");
  440. bio_put(bio);
  441. bio = ERR_PTR(-ENOMEM);
  442. goto out;
  443. }
  444. kaddr += PAGE_SIZE;
  445. }
  446. bio->bi_end_io = pblk_bio_map_addr_endio;
  447. out:
  448. return bio;
  449. }
  450. int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
  451. unsigned long secs_to_flush)
  452. {
  453. int max = pblk->sec_per_write;
  454. int min = pblk->min_write_pgs;
  455. int secs_to_sync = 0;
  456. if (secs_avail >= max)
  457. secs_to_sync = max;
  458. else if (secs_avail >= min)
  459. secs_to_sync = min * (secs_avail / min);
  460. else if (secs_to_flush)
  461. secs_to_sync = min;
  462. return secs_to_sync;
  463. }
  464. void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  465. {
  466. u64 addr;
  467. int i;
  468. spin_lock(&line->lock);
  469. addr = find_next_zero_bit(line->map_bitmap,
  470. pblk->lm.sec_per_line, line->cur_sec);
  471. line->cur_sec = addr - nr_secs;
  472. for (i = 0; i < nr_secs; i++, line->cur_sec--)
  473. WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
  474. spin_unlock(&line->lock);
  475. }
  476. u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  477. {
  478. u64 addr;
  479. int i;
  480. lockdep_assert_held(&line->lock);
  481. /* logic error: ppa out-of-bounds. Prevent generating bad address */
  482. if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
  483. WARN(1, "pblk: page allocation out of bounds\n");
  484. nr_secs = pblk->lm.sec_per_line - line->cur_sec;
  485. }
  486. line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
  487. pblk->lm.sec_per_line, line->cur_sec);
  488. for (i = 0; i < nr_secs; i++, line->cur_sec++)
  489. WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
  490. return addr;
  491. }
  492. u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  493. {
  494. u64 addr;
  495. /* Lock needed in case a write fails and a recovery needs to remap
  496. * failed write buffer entries
  497. */
  498. spin_lock(&line->lock);
  499. addr = __pblk_alloc_page(pblk, line, nr_secs);
  500. line->left_msecs -= nr_secs;
  501. WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
  502. spin_unlock(&line->lock);
  503. return addr;
  504. }
  505. u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
  506. {
  507. u64 paddr;
  508. spin_lock(&line->lock);
  509. paddr = find_next_zero_bit(line->map_bitmap,
  510. pblk->lm.sec_per_line, line->cur_sec);
  511. spin_unlock(&line->lock);
  512. return paddr;
  513. }
  514. /*
  515. * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
  516. * taking the per LUN semaphore.
  517. */
  518. static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
  519. void *emeta_buf, u64 paddr, int dir)
  520. {
  521. struct nvm_tgt_dev *dev = pblk->dev;
  522. struct nvm_geo *geo = &dev->geo;
  523. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  524. struct pblk_line_meta *lm = &pblk->lm;
  525. void *ppa_list, *meta_list;
  526. struct bio *bio;
  527. struct nvm_rq rqd;
  528. dma_addr_t dma_ppa_list, dma_meta_list;
  529. int min = pblk->min_write_pgs;
  530. int left_ppas = lm->emeta_sec[0];
  531. int id = line->id;
  532. int rq_ppas, rq_len;
  533. int cmd_op, bio_op;
  534. int i, j;
  535. int ret;
  536. if (dir == PBLK_WRITE) {
  537. bio_op = REQ_OP_WRITE;
  538. cmd_op = NVM_OP_PWRITE;
  539. } else if (dir == PBLK_READ) {
  540. bio_op = REQ_OP_READ;
  541. cmd_op = NVM_OP_PREAD;
  542. } else
  543. return -EINVAL;
  544. meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  545. &dma_meta_list);
  546. if (!meta_list)
  547. return -ENOMEM;
  548. ppa_list = meta_list + pblk_dma_meta_size;
  549. dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
  550. next_rq:
  551. memset(&rqd, 0, sizeof(struct nvm_rq));
  552. rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
  553. rq_len = rq_ppas * geo->csecs;
  554. bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
  555. l_mg->emeta_alloc_type, GFP_KERNEL);
  556. if (IS_ERR(bio)) {
  557. ret = PTR_ERR(bio);
  558. goto free_rqd_dma;
  559. }
  560. bio->bi_iter.bi_sector = 0; /* internal bio */
  561. bio_set_op_attrs(bio, bio_op, 0);
  562. rqd.bio = bio;
  563. rqd.meta_list = meta_list;
  564. rqd.ppa_list = ppa_list;
  565. rqd.dma_meta_list = dma_meta_list;
  566. rqd.dma_ppa_list = dma_ppa_list;
  567. rqd.opcode = cmd_op;
  568. rqd.nr_ppas = rq_ppas;
  569. if (dir == PBLK_WRITE) {
  570. struct pblk_sec_meta *meta_list = rqd.meta_list;
  571. rqd.is_seq = 1;
  572. for (i = 0; i < rqd.nr_ppas; ) {
  573. spin_lock(&line->lock);
  574. paddr = __pblk_alloc_page(pblk, line, min);
  575. spin_unlock(&line->lock);
  576. for (j = 0; j < min; j++, i++, paddr++) {
  577. meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
  578. rqd.ppa_list[i] =
  579. addr_to_gen_ppa(pblk, paddr, id);
  580. }
  581. }
  582. } else {
  583. for (i = 0; i < rqd.nr_ppas; ) {
  584. struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
  585. int pos = pblk_ppa_to_pos(geo, ppa);
  586. if (pblk_io_aligned(pblk, rq_ppas))
  587. rqd.is_seq = 1;
  588. while (test_bit(pos, line->blk_bitmap)) {
  589. paddr += min;
  590. if (pblk_boundary_paddr_checks(pblk, paddr)) {
  591. pblk_err(pblk, "corrupt emeta line:%d\n",
  592. line->id);
  593. bio_put(bio);
  594. ret = -EINTR;
  595. goto free_rqd_dma;
  596. }
  597. ppa = addr_to_gen_ppa(pblk, paddr, id);
  598. pos = pblk_ppa_to_pos(geo, ppa);
  599. }
  600. if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
  601. pblk_err(pblk, "corrupt emeta line:%d\n",
  602. line->id);
  603. bio_put(bio);
  604. ret = -EINTR;
  605. goto free_rqd_dma;
  606. }
  607. for (j = 0; j < min; j++, i++, paddr++)
  608. rqd.ppa_list[i] =
  609. addr_to_gen_ppa(pblk, paddr, line->id);
  610. }
  611. }
  612. ret = pblk_submit_io_sync(pblk, &rqd);
  613. if (ret) {
  614. pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
  615. bio_put(bio);
  616. goto free_rqd_dma;
  617. }
  618. atomic_dec(&pblk->inflight_io);
  619. if (rqd.error) {
  620. if (dir == PBLK_WRITE)
  621. pblk_log_write_err(pblk, &rqd);
  622. else
  623. pblk_log_read_err(pblk, &rqd);
  624. }
  625. emeta_buf += rq_len;
  626. left_ppas -= rq_ppas;
  627. if (left_ppas)
  628. goto next_rq;
  629. free_rqd_dma:
  630. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  631. return ret;
  632. }
  633. u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
  634. {
  635. struct nvm_tgt_dev *dev = pblk->dev;
  636. struct nvm_geo *geo = &dev->geo;
  637. struct pblk_line_meta *lm = &pblk->lm;
  638. int bit;
  639. /* This usually only happens on bad lines */
  640. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  641. if (bit >= lm->blk_per_line)
  642. return -1;
  643. return bit * geo->ws_opt;
  644. }
  645. static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
  646. u64 paddr, int dir)
  647. {
  648. struct nvm_tgt_dev *dev = pblk->dev;
  649. struct pblk_line_meta *lm = &pblk->lm;
  650. struct bio *bio;
  651. struct nvm_rq rqd;
  652. __le64 *lba_list = NULL;
  653. int i, ret;
  654. int cmd_op, bio_op;
  655. if (dir == PBLK_WRITE) {
  656. bio_op = REQ_OP_WRITE;
  657. cmd_op = NVM_OP_PWRITE;
  658. lba_list = emeta_to_lbas(pblk, line->emeta->buf);
  659. } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
  660. bio_op = REQ_OP_READ;
  661. cmd_op = NVM_OP_PREAD;
  662. } else
  663. return -EINVAL;
  664. memset(&rqd, 0, sizeof(struct nvm_rq));
  665. rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  666. &rqd.dma_meta_list);
  667. if (!rqd.meta_list)
  668. return -ENOMEM;
  669. rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
  670. rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
  671. bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
  672. if (IS_ERR(bio)) {
  673. ret = PTR_ERR(bio);
  674. goto free_ppa_list;
  675. }
  676. bio->bi_iter.bi_sector = 0; /* internal bio */
  677. bio_set_op_attrs(bio, bio_op, 0);
  678. rqd.bio = bio;
  679. rqd.opcode = cmd_op;
  680. rqd.is_seq = 1;
  681. rqd.nr_ppas = lm->smeta_sec;
  682. for (i = 0; i < lm->smeta_sec; i++, paddr++) {
  683. struct pblk_sec_meta *meta_list = rqd.meta_list;
  684. rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
  685. if (dir == PBLK_WRITE) {
  686. __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
  687. meta_list[i].lba = lba_list[paddr] = addr_empty;
  688. }
  689. }
  690. /*
  691. * This I/O is sent by the write thread when a line is replace. Since
  692. * the write thread is the only one sending write and erase commands,
  693. * there is no need to take the LUN semaphore.
  694. */
  695. ret = pblk_submit_io_sync(pblk, &rqd);
  696. if (ret) {
  697. pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
  698. bio_put(bio);
  699. goto free_ppa_list;
  700. }
  701. atomic_dec(&pblk->inflight_io);
  702. if (rqd.error) {
  703. if (dir == PBLK_WRITE) {
  704. pblk_log_write_err(pblk, &rqd);
  705. ret = 1;
  706. } else if (dir == PBLK_READ)
  707. pblk_log_read_err(pblk, &rqd);
  708. }
  709. free_ppa_list:
  710. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  711. return ret;
  712. }
  713. int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
  714. {
  715. u64 bpaddr = pblk_line_smeta_start(pblk, line);
  716. return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
  717. }
  718. int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
  719. void *emeta_buf)
  720. {
  721. return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
  722. line->emeta_ssec, PBLK_READ);
  723. }
  724. static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
  725. struct ppa_addr ppa)
  726. {
  727. rqd->opcode = NVM_OP_ERASE;
  728. rqd->ppa_addr = ppa;
  729. rqd->nr_ppas = 1;
  730. rqd->is_seq = 1;
  731. rqd->bio = NULL;
  732. }
  733. static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
  734. {
  735. struct nvm_rq rqd = {NULL};
  736. int ret;
  737. pblk_setup_e_rq(pblk, &rqd, ppa);
  738. /* The write thread schedules erases so that it minimizes disturbances
  739. * with writes. Thus, there is no need to take the LUN semaphore.
  740. */
  741. ret = pblk_submit_io_sync(pblk, &rqd);
  742. rqd.private = pblk;
  743. __pblk_end_io_erase(pblk, &rqd);
  744. return ret;
  745. }
  746. int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
  747. {
  748. struct pblk_line_meta *lm = &pblk->lm;
  749. struct ppa_addr ppa;
  750. int ret, bit = -1;
  751. /* Erase only good blocks, one at a time */
  752. do {
  753. spin_lock(&line->lock);
  754. bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
  755. bit + 1);
  756. if (bit >= lm->blk_per_line) {
  757. spin_unlock(&line->lock);
  758. break;
  759. }
  760. ppa = pblk->luns[bit].bppa; /* set ch and lun */
  761. ppa.a.blk = line->id;
  762. atomic_dec(&line->left_eblks);
  763. WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
  764. spin_unlock(&line->lock);
  765. ret = pblk_blk_erase_sync(pblk, ppa);
  766. if (ret) {
  767. pblk_err(pblk, "failed to erase line %d\n", line->id);
  768. return ret;
  769. }
  770. } while (1);
  771. return 0;
  772. }
  773. static void pblk_line_setup_metadata(struct pblk_line *line,
  774. struct pblk_line_mgmt *l_mg,
  775. struct pblk_line_meta *lm)
  776. {
  777. int meta_line;
  778. lockdep_assert_held(&l_mg->free_lock);
  779. retry_meta:
  780. meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  781. if (meta_line == PBLK_DATA_LINES) {
  782. spin_unlock(&l_mg->free_lock);
  783. io_schedule();
  784. spin_lock(&l_mg->free_lock);
  785. goto retry_meta;
  786. }
  787. set_bit(meta_line, &l_mg->meta_bitmap);
  788. line->meta_line = meta_line;
  789. line->smeta = l_mg->sline_meta[meta_line];
  790. line->emeta = l_mg->eline_meta[meta_line];
  791. memset(line->smeta, 0, lm->smeta_len);
  792. memset(line->emeta->buf, 0, lm->emeta_len[0]);
  793. line->emeta->mem = 0;
  794. atomic_set(&line->emeta->sync, 0);
  795. }
  796. /* For now lines are always assumed full lines. Thus, smeta former and current
  797. * lun bitmaps are omitted.
  798. */
  799. static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
  800. struct pblk_line *cur)
  801. {
  802. struct nvm_tgt_dev *dev = pblk->dev;
  803. struct nvm_geo *geo = &dev->geo;
  804. struct pblk_line_meta *lm = &pblk->lm;
  805. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  806. struct pblk_emeta *emeta = line->emeta;
  807. struct line_emeta *emeta_buf = emeta->buf;
  808. struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
  809. int nr_blk_line;
  810. /* After erasing the line, new bad blocks might appear and we risk
  811. * having an invalid line
  812. */
  813. nr_blk_line = lm->blk_per_line -
  814. bitmap_weight(line->blk_bitmap, lm->blk_per_line);
  815. if (nr_blk_line < lm->min_blk_line) {
  816. spin_lock(&l_mg->free_lock);
  817. spin_lock(&line->lock);
  818. line->state = PBLK_LINESTATE_BAD;
  819. spin_unlock(&line->lock);
  820. list_add_tail(&line->list, &l_mg->bad_list);
  821. spin_unlock(&l_mg->free_lock);
  822. pblk_debug(pblk, "line %d is bad\n", line->id);
  823. return 0;
  824. }
  825. /* Run-time metadata */
  826. line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
  827. /* Mark LUNs allocated in this line (all for now) */
  828. bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
  829. smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
  830. memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
  831. smeta_buf->header.id = cpu_to_le32(line->id);
  832. smeta_buf->header.type = cpu_to_le16(line->type);
  833. smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
  834. smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
  835. /* Start metadata */
  836. smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
  837. smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
  838. /* Fill metadata among lines */
  839. if (cur) {
  840. memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
  841. smeta_buf->prev_id = cpu_to_le32(cur->id);
  842. cur->emeta->buf->next_id = cpu_to_le32(line->id);
  843. } else {
  844. smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
  845. }
  846. /* All smeta must be set at this point */
  847. smeta_buf->header.crc = cpu_to_le32(
  848. pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
  849. smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
  850. /* End metadata */
  851. memcpy(&emeta_buf->header, &smeta_buf->header,
  852. sizeof(struct line_header));
  853. emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
  854. emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
  855. emeta_buf->header.crc = cpu_to_le32(
  856. pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
  857. emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
  858. emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
  859. emeta_buf->nr_valid_lbas = cpu_to_le64(0);
  860. emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
  861. emeta_buf->crc = cpu_to_le32(0);
  862. emeta_buf->prev_id = smeta_buf->prev_id;
  863. return 1;
  864. }
  865. static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
  866. {
  867. struct pblk_line_meta *lm = &pblk->lm;
  868. line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
  869. if (!line->map_bitmap)
  870. return -ENOMEM;
  871. /* will be initialized using bb info from map_bitmap */
  872. line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
  873. if (!line->invalid_bitmap) {
  874. kfree(line->map_bitmap);
  875. line->map_bitmap = NULL;
  876. return -ENOMEM;
  877. }
  878. return 0;
  879. }
  880. /* For now lines are always assumed full lines. Thus, smeta former and current
  881. * lun bitmaps are omitted.
  882. */
  883. static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
  884. int init)
  885. {
  886. struct nvm_tgt_dev *dev = pblk->dev;
  887. struct nvm_geo *geo = &dev->geo;
  888. struct pblk_line_meta *lm = &pblk->lm;
  889. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  890. u64 off;
  891. int bit = -1;
  892. int emeta_secs;
  893. line->sec_in_line = lm->sec_per_line;
  894. /* Capture bad block information on line mapping bitmaps */
  895. while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
  896. bit + 1)) < lm->blk_per_line) {
  897. off = bit * geo->ws_opt;
  898. bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
  899. lm->sec_per_line);
  900. bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
  901. lm->sec_per_line);
  902. line->sec_in_line -= geo->clba;
  903. }
  904. /* Mark smeta metadata sectors as bad sectors */
  905. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  906. off = bit * geo->ws_opt;
  907. bitmap_set(line->map_bitmap, off, lm->smeta_sec);
  908. line->sec_in_line -= lm->smeta_sec;
  909. line->smeta_ssec = off;
  910. line->cur_sec = off + lm->smeta_sec;
  911. if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
  912. pblk_debug(pblk, "line smeta I/O failed. Retry\n");
  913. return 0;
  914. }
  915. bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
  916. /* Mark emeta metadata sectors as bad sectors. We need to consider bad
  917. * blocks to make sure that there are enough sectors to store emeta
  918. */
  919. emeta_secs = lm->emeta_sec[0];
  920. off = lm->sec_per_line;
  921. while (emeta_secs) {
  922. off -= geo->ws_opt;
  923. if (!test_bit(off, line->invalid_bitmap)) {
  924. bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
  925. emeta_secs -= geo->ws_opt;
  926. }
  927. }
  928. line->emeta_ssec = off;
  929. line->sec_in_line -= lm->emeta_sec[0];
  930. line->nr_valid_lbas = 0;
  931. line->left_msecs = line->sec_in_line;
  932. *line->vsc = cpu_to_le32(line->sec_in_line);
  933. if (lm->sec_per_line - line->sec_in_line !=
  934. bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
  935. spin_lock(&line->lock);
  936. line->state = PBLK_LINESTATE_BAD;
  937. spin_unlock(&line->lock);
  938. list_add_tail(&line->list, &l_mg->bad_list);
  939. pblk_err(pblk, "unexpected line %d is bad\n", line->id);
  940. return 0;
  941. }
  942. return 1;
  943. }
  944. static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
  945. {
  946. struct pblk_line_meta *lm = &pblk->lm;
  947. struct nvm_tgt_dev *dev = pblk->dev;
  948. struct nvm_geo *geo = &dev->geo;
  949. int blk_to_erase = atomic_read(&line->blk_in_line);
  950. int i;
  951. for (i = 0; i < lm->blk_per_line; i++) {
  952. struct pblk_lun *rlun = &pblk->luns[i];
  953. int pos = pblk_ppa_to_pos(geo, rlun->bppa);
  954. int state = line->chks[pos].state;
  955. /* Free chunks should not be erased */
  956. if (state & NVM_CHK_ST_FREE) {
  957. set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
  958. line->erase_bitmap);
  959. blk_to_erase--;
  960. }
  961. }
  962. return blk_to_erase;
  963. }
  964. static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
  965. {
  966. struct pblk_line_meta *lm = &pblk->lm;
  967. int blk_in_line = atomic_read(&line->blk_in_line);
  968. int blk_to_erase;
  969. /* Bad blocks do not need to be erased */
  970. bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
  971. spin_lock(&line->lock);
  972. /* If we have not written to this line, we need to mark up free chunks
  973. * as already erased
  974. */
  975. if (line->state == PBLK_LINESTATE_NEW) {
  976. blk_to_erase = pblk_prepare_new_line(pblk, line);
  977. line->state = PBLK_LINESTATE_FREE;
  978. } else {
  979. blk_to_erase = blk_in_line;
  980. }
  981. if (blk_in_line < lm->min_blk_line) {
  982. spin_unlock(&line->lock);
  983. return -EAGAIN;
  984. }
  985. if (line->state != PBLK_LINESTATE_FREE) {
  986. WARN(1, "pblk: corrupted line %d, state %d\n",
  987. line->id, line->state);
  988. spin_unlock(&line->lock);
  989. return -EINTR;
  990. }
  991. line->state = PBLK_LINESTATE_OPEN;
  992. atomic_set(&line->left_eblks, blk_to_erase);
  993. atomic_set(&line->left_seblks, blk_to_erase);
  994. line->meta_distance = lm->meta_distance;
  995. spin_unlock(&line->lock);
  996. kref_init(&line->ref);
  997. return 0;
  998. }
  999. int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
  1000. {
  1001. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1002. int ret;
  1003. spin_lock(&l_mg->free_lock);
  1004. l_mg->data_line = line;
  1005. list_del(&line->list);
  1006. ret = pblk_line_prepare(pblk, line);
  1007. if (ret) {
  1008. list_add(&line->list, &l_mg->free_list);
  1009. spin_unlock(&l_mg->free_lock);
  1010. return ret;
  1011. }
  1012. spin_unlock(&l_mg->free_lock);
  1013. ret = pblk_line_alloc_bitmaps(pblk, line);
  1014. if (ret)
  1015. return ret;
  1016. if (!pblk_line_init_bb(pblk, line, 0)) {
  1017. list_add(&line->list, &l_mg->free_list);
  1018. return -EINTR;
  1019. }
  1020. pblk_rl_free_lines_dec(&pblk->rl, line, true);
  1021. return 0;
  1022. }
  1023. void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
  1024. {
  1025. kfree(line->map_bitmap);
  1026. line->map_bitmap = NULL;
  1027. line->smeta = NULL;
  1028. line->emeta = NULL;
  1029. }
  1030. static void pblk_line_reinit(struct pblk_line *line)
  1031. {
  1032. *line->vsc = cpu_to_le32(EMPTY_ENTRY);
  1033. line->map_bitmap = NULL;
  1034. line->invalid_bitmap = NULL;
  1035. line->smeta = NULL;
  1036. line->emeta = NULL;
  1037. }
  1038. void pblk_line_free(struct pblk_line *line)
  1039. {
  1040. kfree(line->map_bitmap);
  1041. kfree(line->invalid_bitmap);
  1042. pblk_line_reinit(line);
  1043. }
  1044. struct pblk_line *pblk_line_get(struct pblk *pblk)
  1045. {
  1046. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1047. struct pblk_line_meta *lm = &pblk->lm;
  1048. struct pblk_line *line;
  1049. int ret, bit;
  1050. lockdep_assert_held(&l_mg->free_lock);
  1051. retry:
  1052. if (list_empty(&l_mg->free_list)) {
  1053. pblk_err(pblk, "no free lines\n");
  1054. return NULL;
  1055. }
  1056. line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
  1057. list_del(&line->list);
  1058. l_mg->nr_free_lines--;
  1059. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  1060. if (unlikely(bit >= lm->blk_per_line)) {
  1061. spin_lock(&line->lock);
  1062. line->state = PBLK_LINESTATE_BAD;
  1063. spin_unlock(&line->lock);
  1064. list_add_tail(&line->list, &l_mg->bad_list);
  1065. pblk_debug(pblk, "line %d is bad\n", line->id);
  1066. goto retry;
  1067. }
  1068. ret = pblk_line_prepare(pblk, line);
  1069. if (ret) {
  1070. switch (ret) {
  1071. case -EAGAIN:
  1072. list_add(&line->list, &l_mg->bad_list);
  1073. goto retry;
  1074. case -EINTR:
  1075. list_add(&line->list, &l_mg->corrupt_list);
  1076. goto retry;
  1077. default:
  1078. pblk_err(pblk, "failed to prepare line %d\n", line->id);
  1079. list_add(&line->list, &l_mg->free_list);
  1080. l_mg->nr_free_lines++;
  1081. return NULL;
  1082. }
  1083. }
  1084. return line;
  1085. }
  1086. static struct pblk_line *pblk_line_retry(struct pblk *pblk,
  1087. struct pblk_line *line)
  1088. {
  1089. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1090. struct pblk_line *retry_line;
  1091. retry:
  1092. spin_lock(&l_mg->free_lock);
  1093. retry_line = pblk_line_get(pblk);
  1094. if (!retry_line) {
  1095. l_mg->data_line = NULL;
  1096. spin_unlock(&l_mg->free_lock);
  1097. return NULL;
  1098. }
  1099. retry_line->map_bitmap = line->map_bitmap;
  1100. retry_line->invalid_bitmap = line->invalid_bitmap;
  1101. retry_line->smeta = line->smeta;
  1102. retry_line->emeta = line->emeta;
  1103. retry_line->meta_line = line->meta_line;
  1104. pblk_line_reinit(line);
  1105. l_mg->data_line = retry_line;
  1106. spin_unlock(&l_mg->free_lock);
  1107. pblk_rl_free_lines_dec(&pblk->rl, line, false);
  1108. if (pblk_line_erase(pblk, retry_line))
  1109. goto retry;
  1110. return retry_line;
  1111. }
  1112. static void pblk_set_space_limit(struct pblk *pblk)
  1113. {
  1114. struct pblk_rl *rl = &pblk->rl;
  1115. atomic_set(&rl->rb_space, 0);
  1116. }
  1117. struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
  1118. {
  1119. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1120. struct pblk_line *line;
  1121. spin_lock(&l_mg->free_lock);
  1122. line = pblk_line_get(pblk);
  1123. if (!line) {
  1124. spin_unlock(&l_mg->free_lock);
  1125. return NULL;
  1126. }
  1127. line->seq_nr = l_mg->d_seq_nr++;
  1128. line->type = PBLK_LINETYPE_DATA;
  1129. l_mg->data_line = line;
  1130. pblk_line_setup_metadata(line, l_mg, &pblk->lm);
  1131. /* Allocate next line for preparation */
  1132. l_mg->data_next = pblk_line_get(pblk);
  1133. if (!l_mg->data_next) {
  1134. /* If we cannot get a new line, we need to stop the pipeline.
  1135. * Only allow as many writes in as we can store safely and then
  1136. * fail gracefully
  1137. */
  1138. pblk_set_space_limit(pblk);
  1139. l_mg->data_next = NULL;
  1140. } else {
  1141. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1142. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1143. }
  1144. spin_unlock(&l_mg->free_lock);
  1145. if (pblk_line_alloc_bitmaps(pblk, line))
  1146. return NULL;
  1147. if (pblk_line_erase(pblk, line)) {
  1148. line = pblk_line_retry(pblk, line);
  1149. if (!line)
  1150. return NULL;
  1151. }
  1152. retry_setup:
  1153. if (!pblk_line_init_metadata(pblk, line, NULL)) {
  1154. line = pblk_line_retry(pblk, line);
  1155. if (!line)
  1156. return NULL;
  1157. goto retry_setup;
  1158. }
  1159. if (!pblk_line_init_bb(pblk, line, 1)) {
  1160. line = pblk_line_retry(pblk, line);
  1161. if (!line)
  1162. return NULL;
  1163. goto retry_setup;
  1164. }
  1165. pblk_rl_free_lines_dec(&pblk->rl, line, true);
  1166. return line;
  1167. }
  1168. void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
  1169. {
  1170. struct pblk_line *line;
  1171. line = pblk_ppa_to_line(pblk, ppa);
  1172. kref_put(&line->ref, pblk_line_put_wq);
  1173. }
  1174. void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
  1175. {
  1176. struct ppa_addr *ppa_list;
  1177. int i;
  1178. ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
  1179. for (i = 0; i < rqd->nr_ppas; i++)
  1180. pblk_ppa_to_line_put(pblk, ppa_list[i]);
  1181. }
  1182. static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
  1183. {
  1184. lockdep_assert_held(&pblk->l_mg.free_lock);
  1185. pblk_set_space_limit(pblk);
  1186. pblk->state = PBLK_STATE_STOPPING;
  1187. }
  1188. static void pblk_line_close_meta_sync(struct pblk *pblk)
  1189. {
  1190. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1191. struct pblk_line_meta *lm = &pblk->lm;
  1192. struct pblk_line *line, *tline;
  1193. LIST_HEAD(list);
  1194. spin_lock(&l_mg->close_lock);
  1195. if (list_empty(&l_mg->emeta_list)) {
  1196. spin_unlock(&l_mg->close_lock);
  1197. return;
  1198. }
  1199. list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
  1200. spin_unlock(&l_mg->close_lock);
  1201. list_for_each_entry_safe(line, tline, &list, list) {
  1202. struct pblk_emeta *emeta = line->emeta;
  1203. while (emeta->mem < lm->emeta_len[0]) {
  1204. int ret;
  1205. ret = pblk_submit_meta_io(pblk, line);
  1206. if (ret) {
  1207. pblk_err(pblk, "sync meta line %d failed (%d)\n",
  1208. line->id, ret);
  1209. return;
  1210. }
  1211. }
  1212. }
  1213. pblk_wait_for_meta(pblk);
  1214. flush_workqueue(pblk->close_wq);
  1215. }
  1216. void __pblk_pipeline_flush(struct pblk *pblk)
  1217. {
  1218. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1219. int ret;
  1220. spin_lock(&l_mg->free_lock);
  1221. if (pblk->state == PBLK_STATE_RECOVERING ||
  1222. pblk->state == PBLK_STATE_STOPPED) {
  1223. spin_unlock(&l_mg->free_lock);
  1224. return;
  1225. }
  1226. pblk->state = PBLK_STATE_RECOVERING;
  1227. spin_unlock(&l_mg->free_lock);
  1228. pblk_flush_writer(pblk);
  1229. pblk_wait_for_meta(pblk);
  1230. ret = pblk_recov_pad(pblk);
  1231. if (ret) {
  1232. pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
  1233. return;
  1234. }
  1235. flush_workqueue(pblk->bb_wq);
  1236. pblk_line_close_meta_sync(pblk);
  1237. }
  1238. void __pblk_pipeline_stop(struct pblk *pblk)
  1239. {
  1240. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1241. spin_lock(&l_mg->free_lock);
  1242. pblk->state = PBLK_STATE_STOPPED;
  1243. l_mg->data_line = NULL;
  1244. l_mg->data_next = NULL;
  1245. spin_unlock(&l_mg->free_lock);
  1246. }
  1247. void pblk_pipeline_stop(struct pblk *pblk)
  1248. {
  1249. __pblk_pipeline_flush(pblk);
  1250. __pblk_pipeline_stop(pblk);
  1251. }
  1252. struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
  1253. {
  1254. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1255. struct pblk_line *cur, *new = NULL;
  1256. unsigned int left_seblks;
  1257. cur = l_mg->data_line;
  1258. new = l_mg->data_next;
  1259. if (!new)
  1260. goto out;
  1261. l_mg->data_line = new;
  1262. spin_lock(&l_mg->free_lock);
  1263. pblk_line_setup_metadata(new, l_mg, &pblk->lm);
  1264. spin_unlock(&l_mg->free_lock);
  1265. retry_erase:
  1266. left_seblks = atomic_read(&new->left_seblks);
  1267. if (left_seblks) {
  1268. /* If line is not fully erased, erase it */
  1269. if (atomic_read(&new->left_eblks)) {
  1270. if (pblk_line_erase(pblk, new))
  1271. goto out;
  1272. } else {
  1273. io_schedule();
  1274. }
  1275. goto retry_erase;
  1276. }
  1277. if (pblk_line_alloc_bitmaps(pblk, new))
  1278. return NULL;
  1279. retry_setup:
  1280. if (!pblk_line_init_metadata(pblk, new, cur)) {
  1281. new = pblk_line_retry(pblk, new);
  1282. if (!new)
  1283. goto out;
  1284. goto retry_setup;
  1285. }
  1286. if (!pblk_line_init_bb(pblk, new, 1)) {
  1287. new = pblk_line_retry(pblk, new);
  1288. if (!new)
  1289. goto out;
  1290. goto retry_setup;
  1291. }
  1292. pblk_rl_free_lines_dec(&pblk->rl, new, true);
  1293. /* Allocate next line for preparation */
  1294. spin_lock(&l_mg->free_lock);
  1295. l_mg->data_next = pblk_line_get(pblk);
  1296. if (!l_mg->data_next) {
  1297. /* If we cannot get a new line, we need to stop the pipeline.
  1298. * Only allow as many writes in as we can store safely and then
  1299. * fail gracefully
  1300. */
  1301. pblk_stop_writes(pblk, new);
  1302. l_mg->data_next = NULL;
  1303. } else {
  1304. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1305. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1306. }
  1307. spin_unlock(&l_mg->free_lock);
  1308. out:
  1309. return new;
  1310. }
  1311. static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
  1312. {
  1313. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1314. struct pblk_gc *gc = &pblk->gc;
  1315. spin_lock(&line->lock);
  1316. WARN_ON(line->state != PBLK_LINESTATE_GC);
  1317. line->state = PBLK_LINESTATE_FREE;
  1318. line->gc_group = PBLK_LINEGC_NONE;
  1319. pblk_line_free(line);
  1320. if (line->w_err_gc->has_write_err) {
  1321. pblk_rl_werr_line_out(&pblk->rl);
  1322. line->w_err_gc->has_write_err = 0;
  1323. }
  1324. spin_unlock(&line->lock);
  1325. atomic_dec(&gc->pipeline_gc);
  1326. spin_lock(&l_mg->free_lock);
  1327. list_add_tail(&line->list, &l_mg->free_list);
  1328. l_mg->nr_free_lines++;
  1329. spin_unlock(&l_mg->free_lock);
  1330. pblk_rl_free_lines_inc(&pblk->rl, line);
  1331. }
  1332. static void pblk_line_put_ws(struct work_struct *work)
  1333. {
  1334. struct pblk_line_ws *line_put_ws = container_of(work,
  1335. struct pblk_line_ws, ws);
  1336. struct pblk *pblk = line_put_ws->pblk;
  1337. struct pblk_line *line = line_put_ws->line;
  1338. __pblk_line_put(pblk, line);
  1339. mempool_free(line_put_ws, &pblk->gen_ws_pool);
  1340. }
  1341. void pblk_line_put(struct kref *ref)
  1342. {
  1343. struct pblk_line *line = container_of(ref, struct pblk_line, ref);
  1344. struct pblk *pblk = line->pblk;
  1345. __pblk_line_put(pblk, line);
  1346. }
  1347. void pblk_line_put_wq(struct kref *ref)
  1348. {
  1349. struct pblk_line *line = container_of(ref, struct pblk_line, ref);
  1350. struct pblk *pblk = line->pblk;
  1351. struct pblk_line_ws *line_put_ws;
  1352. line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
  1353. if (!line_put_ws)
  1354. return;
  1355. line_put_ws->pblk = pblk;
  1356. line_put_ws->line = line;
  1357. line_put_ws->priv = NULL;
  1358. INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
  1359. queue_work(pblk->r_end_wq, &line_put_ws->ws);
  1360. }
  1361. int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
  1362. {
  1363. struct nvm_rq *rqd;
  1364. int err;
  1365. rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
  1366. pblk_setup_e_rq(pblk, rqd, ppa);
  1367. rqd->end_io = pblk_end_io_erase;
  1368. rqd->private = pblk;
  1369. /* The write thread schedules erases so that it minimizes disturbances
  1370. * with writes. Thus, there is no need to take the LUN semaphore.
  1371. */
  1372. err = pblk_submit_io(pblk, rqd);
  1373. if (err) {
  1374. struct nvm_tgt_dev *dev = pblk->dev;
  1375. struct nvm_geo *geo = &dev->geo;
  1376. pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
  1377. pblk_ppa_to_line_id(ppa),
  1378. pblk_ppa_to_pos(geo, ppa));
  1379. }
  1380. return err;
  1381. }
  1382. struct pblk_line *pblk_line_get_data(struct pblk *pblk)
  1383. {
  1384. return pblk->l_mg.data_line;
  1385. }
  1386. /* For now, always erase next line */
  1387. struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
  1388. {
  1389. return pblk->l_mg.data_next;
  1390. }
  1391. int pblk_line_is_full(struct pblk_line *line)
  1392. {
  1393. return (line->left_msecs == 0);
  1394. }
  1395. static void pblk_line_should_sync_meta(struct pblk *pblk)
  1396. {
  1397. if (pblk_rl_is_limit(&pblk->rl))
  1398. pblk_line_close_meta_sync(pblk);
  1399. }
  1400. void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
  1401. {
  1402. struct nvm_tgt_dev *dev = pblk->dev;
  1403. struct nvm_geo *geo = &dev->geo;
  1404. struct pblk_line_meta *lm = &pblk->lm;
  1405. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1406. struct list_head *move_list;
  1407. int i;
  1408. #ifdef CONFIG_NVM_PBLK_DEBUG
  1409. WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
  1410. "pblk: corrupt closed line %d\n", line->id);
  1411. #endif
  1412. spin_lock(&l_mg->free_lock);
  1413. WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
  1414. spin_unlock(&l_mg->free_lock);
  1415. spin_lock(&l_mg->gc_lock);
  1416. spin_lock(&line->lock);
  1417. WARN_ON(line->state != PBLK_LINESTATE_OPEN);
  1418. line->state = PBLK_LINESTATE_CLOSED;
  1419. move_list = pblk_line_gc_list(pblk, line);
  1420. list_add_tail(&line->list, move_list);
  1421. kfree(line->map_bitmap);
  1422. line->map_bitmap = NULL;
  1423. line->smeta = NULL;
  1424. line->emeta = NULL;
  1425. for (i = 0; i < lm->blk_per_line; i++) {
  1426. struct pblk_lun *rlun = &pblk->luns[i];
  1427. int pos = pblk_ppa_to_pos(geo, rlun->bppa);
  1428. int state = line->chks[pos].state;
  1429. if (!(state & NVM_CHK_ST_OFFLINE))
  1430. state = NVM_CHK_ST_CLOSED;
  1431. }
  1432. spin_unlock(&line->lock);
  1433. spin_unlock(&l_mg->gc_lock);
  1434. }
  1435. void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
  1436. {
  1437. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1438. struct pblk_line_meta *lm = &pblk->lm;
  1439. struct pblk_emeta *emeta = line->emeta;
  1440. struct line_emeta *emeta_buf = emeta->buf;
  1441. struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
  1442. /* No need for exact vsc value; avoid a big line lock and take aprox. */
  1443. memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
  1444. memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
  1445. wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
  1446. wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
  1447. wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
  1448. emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
  1449. emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
  1450. spin_lock(&l_mg->close_lock);
  1451. spin_lock(&line->lock);
  1452. /* Update the in-memory start address for emeta, in case it has
  1453. * shifted due to write errors
  1454. */
  1455. if (line->emeta_ssec != line->cur_sec)
  1456. line->emeta_ssec = line->cur_sec;
  1457. list_add_tail(&line->list, &l_mg->emeta_list);
  1458. spin_unlock(&line->lock);
  1459. spin_unlock(&l_mg->close_lock);
  1460. pblk_line_should_sync_meta(pblk);
  1461. }
  1462. static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
  1463. {
  1464. struct pblk_line_meta *lm = &pblk->lm;
  1465. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1466. unsigned int lba_list_size = lm->emeta_len[2];
  1467. struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
  1468. struct pblk_emeta *emeta = line->emeta;
  1469. w_err_gc->lba_list = pblk_malloc(lba_list_size,
  1470. l_mg->emeta_alloc_type, GFP_KERNEL);
  1471. memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
  1472. lba_list_size);
  1473. }
  1474. void pblk_line_close_ws(struct work_struct *work)
  1475. {
  1476. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  1477. ws);
  1478. struct pblk *pblk = line_ws->pblk;
  1479. struct pblk_line *line = line_ws->line;
  1480. struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
  1481. /* Write errors makes the emeta start address stored in smeta invalid,
  1482. * so keep a copy of the lba list until we've gc'd the line
  1483. */
  1484. if (w_err_gc->has_write_err)
  1485. pblk_save_lba_list(pblk, line);
  1486. pblk_line_close(pblk, line);
  1487. mempool_free(line_ws, &pblk->gen_ws_pool);
  1488. }
  1489. void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
  1490. void (*work)(struct work_struct *), gfp_t gfp_mask,
  1491. struct workqueue_struct *wq)
  1492. {
  1493. struct pblk_line_ws *line_ws;
  1494. line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
  1495. line_ws->pblk = pblk;
  1496. line_ws->line = line;
  1497. line_ws->priv = priv;
  1498. INIT_WORK(&line_ws->ws, work);
  1499. queue_work(wq, &line_ws->ws);
  1500. }
  1501. static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
  1502. int nr_ppas, int pos)
  1503. {
  1504. struct pblk_lun *rlun = &pblk->luns[pos];
  1505. int ret;
  1506. /*
  1507. * Only send one inflight I/O per LUN. Since we map at a page
  1508. * granurality, all ppas in the I/O will map to the same LUN
  1509. */
  1510. #ifdef CONFIG_NVM_PBLK_DEBUG
  1511. int i;
  1512. for (i = 1; i < nr_ppas; i++)
  1513. WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
  1514. ppa_list[0].a.ch != ppa_list[i].a.ch);
  1515. #endif
  1516. ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
  1517. if (ret == -ETIME || ret == -EINTR)
  1518. pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
  1519. -ret);
  1520. }
  1521. void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
  1522. {
  1523. struct nvm_tgt_dev *dev = pblk->dev;
  1524. struct nvm_geo *geo = &dev->geo;
  1525. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1526. __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
  1527. }
  1528. void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1529. unsigned long *lun_bitmap)
  1530. {
  1531. struct nvm_tgt_dev *dev = pblk->dev;
  1532. struct nvm_geo *geo = &dev->geo;
  1533. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1534. /* If the LUN has been locked for this same request, do no attempt to
  1535. * lock it again
  1536. */
  1537. if (test_and_set_bit(pos, lun_bitmap))
  1538. return;
  1539. __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
  1540. }
  1541. void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
  1542. {
  1543. struct nvm_tgt_dev *dev = pblk->dev;
  1544. struct nvm_geo *geo = &dev->geo;
  1545. struct pblk_lun *rlun;
  1546. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1547. #ifdef CONFIG_NVM_PBLK_DEBUG
  1548. int i;
  1549. for (i = 1; i < nr_ppas; i++)
  1550. WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
  1551. ppa_list[0].a.ch != ppa_list[i].a.ch);
  1552. #endif
  1553. rlun = &pblk->luns[pos];
  1554. up(&rlun->wr_sem);
  1555. }
  1556. void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1557. unsigned long *lun_bitmap)
  1558. {
  1559. struct nvm_tgt_dev *dev = pblk->dev;
  1560. struct nvm_geo *geo = &dev->geo;
  1561. struct pblk_lun *rlun;
  1562. int num_lun = geo->all_luns;
  1563. int bit = -1;
  1564. while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
  1565. rlun = &pblk->luns[bit];
  1566. up(&rlun->wr_sem);
  1567. }
  1568. }
  1569. void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1570. {
  1571. struct ppa_addr ppa_l2p;
  1572. /* logic error: lba out-of-bounds. Ignore update */
  1573. if (!(lba < pblk->rl.nr_secs)) {
  1574. WARN(1, "pblk: corrupted L2P map request\n");
  1575. return;
  1576. }
  1577. spin_lock(&pblk->trans_lock);
  1578. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1579. if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
  1580. pblk_map_invalidate(pblk, ppa_l2p);
  1581. pblk_trans_map_set(pblk, lba, ppa);
  1582. spin_unlock(&pblk->trans_lock);
  1583. }
  1584. void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1585. {
  1586. #ifdef CONFIG_NVM_PBLK_DEBUG
  1587. /* Callers must ensure that the ppa points to a cache address */
  1588. BUG_ON(!pblk_addr_in_cache(ppa));
  1589. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
  1590. #endif
  1591. pblk_update_map(pblk, lba, ppa);
  1592. }
  1593. int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
  1594. struct pblk_line *gc_line, u64 paddr_gc)
  1595. {
  1596. struct ppa_addr ppa_l2p, ppa_gc;
  1597. int ret = 1;
  1598. #ifdef CONFIG_NVM_PBLK_DEBUG
  1599. /* Callers must ensure that the ppa points to a cache address */
  1600. BUG_ON(!pblk_addr_in_cache(ppa_new));
  1601. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
  1602. #endif
  1603. /* logic error: lba out-of-bounds. Ignore update */
  1604. if (!(lba < pblk->rl.nr_secs)) {
  1605. WARN(1, "pblk: corrupted L2P map request\n");
  1606. return 0;
  1607. }
  1608. spin_lock(&pblk->trans_lock);
  1609. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1610. ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
  1611. if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
  1612. spin_lock(&gc_line->lock);
  1613. WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
  1614. "pblk: corrupted GC update");
  1615. spin_unlock(&gc_line->lock);
  1616. ret = 0;
  1617. goto out;
  1618. }
  1619. pblk_trans_map_set(pblk, lba, ppa_new);
  1620. out:
  1621. spin_unlock(&pblk->trans_lock);
  1622. return ret;
  1623. }
  1624. void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
  1625. struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
  1626. {
  1627. struct ppa_addr ppa_l2p;
  1628. #ifdef CONFIG_NVM_PBLK_DEBUG
  1629. /* Callers must ensure that the ppa points to a device address */
  1630. BUG_ON(pblk_addr_in_cache(ppa_mapped));
  1631. #endif
  1632. /* Invalidate and discard padded entries */
  1633. if (lba == ADDR_EMPTY) {
  1634. atomic64_inc(&pblk->pad_wa);
  1635. #ifdef CONFIG_NVM_PBLK_DEBUG
  1636. atomic_long_inc(&pblk->padded_wb);
  1637. #endif
  1638. if (!pblk_ppa_empty(ppa_mapped))
  1639. pblk_map_invalidate(pblk, ppa_mapped);
  1640. return;
  1641. }
  1642. /* logic error: lba out-of-bounds. Ignore update */
  1643. if (!(lba < pblk->rl.nr_secs)) {
  1644. WARN(1, "pblk: corrupted L2P map request\n");
  1645. return;
  1646. }
  1647. spin_lock(&pblk->trans_lock);
  1648. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1649. /* Do not update L2P if the cacheline has been updated. In this case,
  1650. * the mapped ppa must be invalidated
  1651. */
  1652. if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
  1653. if (!pblk_ppa_empty(ppa_mapped))
  1654. pblk_map_invalidate(pblk, ppa_mapped);
  1655. goto out;
  1656. }
  1657. #ifdef CONFIG_NVM_PBLK_DEBUG
  1658. WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
  1659. #endif
  1660. pblk_trans_map_set(pblk, lba, ppa_mapped);
  1661. out:
  1662. spin_unlock(&pblk->trans_lock);
  1663. }
  1664. void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
  1665. sector_t blba, int nr_secs)
  1666. {
  1667. int i;
  1668. spin_lock(&pblk->trans_lock);
  1669. for (i = 0; i < nr_secs; i++) {
  1670. struct ppa_addr ppa;
  1671. ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
  1672. /* If the L2P entry maps to a line, the reference is valid */
  1673. if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
  1674. struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
  1675. kref_get(&line->ref);
  1676. }
  1677. }
  1678. spin_unlock(&pblk->trans_lock);
  1679. }
  1680. void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
  1681. u64 *lba_list, int nr_secs)
  1682. {
  1683. u64 lba;
  1684. int i;
  1685. spin_lock(&pblk->trans_lock);
  1686. for (i = 0; i < nr_secs; i++) {
  1687. lba = lba_list[i];
  1688. if (lba != ADDR_EMPTY) {
  1689. /* logic error: lba out-of-bounds. Ignore update */
  1690. if (!(lba < pblk->rl.nr_secs)) {
  1691. WARN(1, "pblk: corrupted L2P map request\n");
  1692. continue;
  1693. }
  1694. ppas[i] = pblk_trans_map_get(pblk, lba);
  1695. }
  1696. }
  1697. spin_unlock(&pblk->trans_lock);
  1698. }