pblk-core.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-core.c - pblk's core functionality
  16. *
  17. */
  18. #include "pblk.h"
  19. #include <linux/time.h>
  20. static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
  21. struct ppa_addr *ppa)
  22. {
  23. struct nvm_tgt_dev *dev = pblk->dev;
  24. struct nvm_geo *geo = &dev->geo;
  25. int pos = pblk_dev_ppa_to_pos(geo, *ppa);
  26. pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
  27. atomic_long_inc(&pblk->erase_failed);
  28. if (test_and_set_bit(pos, line->blk_bitmap))
  29. pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
  30. line->id, pos);
  31. pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb);
  32. }
  33. static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
  34. {
  35. struct pblk_line *line;
  36. line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
  37. atomic_dec(&line->left_seblks);
  38. if (rqd->error) {
  39. struct ppa_addr *ppa;
  40. ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
  41. if (!ppa)
  42. return;
  43. *ppa = rqd->ppa_addr;
  44. pblk_mark_bb(pblk, line, ppa);
  45. }
  46. }
  47. /* Erase completion assumes that only one block is erased at the time */
  48. static void pblk_end_io_erase(struct nvm_rq *rqd)
  49. {
  50. struct pblk *pblk = rqd->private;
  51. up(&pblk->erase_sem);
  52. __pblk_end_io_erase(pblk, rqd);
  53. mempool_free(rqd, pblk->r_rq_pool);
  54. }
  55. static void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
  56. u64 paddr)
  57. {
  58. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  59. struct list_head *move_list = NULL;
  60. /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
  61. * table is modified with reclaimed sectors, a check is done to endure
  62. * that newer updates are not overwritten.
  63. */
  64. spin_lock(&line->lock);
  65. if (line->state == PBLK_LINESTATE_GC ||
  66. line->state == PBLK_LINESTATE_FREE) {
  67. spin_unlock(&line->lock);
  68. return;
  69. }
  70. if (test_and_set_bit(paddr, line->invalid_bitmap)) {
  71. WARN_ONCE(1, "pblk: double invalidate\n");
  72. spin_unlock(&line->lock);
  73. return;
  74. }
  75. line->vsc--;
  76. if (line->state == PBLK_LINESTATE_CLOSED)
  77. move_list = pblk_line_gc_list(pblk, line);
  78. spin_unlock(&line->lock);
  79. if (move_list) {
  80. spin_lock(&l_mg->gc_lock);
  81. spin_lock(&line->lock);
  82. /* Prevent moving a line that has just been chosen for GC */
  83. if (line->state == PBLK_LINESTATE_GC ||
  84. line->state == PBLK_LINESTATE_FREE) {
  85. spin_unlock(&line->lock);
  86. spin_unlock(&l_mg->gc_lock);
  87. return;
  88. }
  89. spin_unlock(&line->lock);
  90. list_move_tail(&line->list, move_list);
  91. spin_unlock(&l_mg->gc_lock);
  92. }
  93. }
  94. void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
  95. {
  96. struct pblk_line *line;
  97. u64 paddr;
  98. int line_id;
  99. #ifdef CONFIG_NVM_DEBUG
  100. /* Callers must ensure that the ppa points to a device address */
  101. BUG_ON(pblk_addr_in_cache(ppa));
  102. BUG_ON(pblk_ppa_empty(ppa));
  103. #endif
  104. line_id = pblk_tgt_ppa_to_line(ppa);
  105. line = &pblk->lines[line_id];
  106. paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
  107. __pblk_map_invalidate(pblk, line, paddr);
  108. }
  109. void pblk_map_pad_invalidate(struct pblk *pblk, struct pblk_line *line,
  110. u64 paddr)
  111. {
  112. __pblk_map_invalidate(pblk, line, paddr);
  113. pblk_rb_sync_init(&pblk->rwb, NULL);
  114. line->left_ssecs--;
  115. if (!line->left_ssecs)
  116. pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws);
  117. pblk_rb_sync_end(&pblk->rwb, NULL);
  118. }
  119. static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
  120. unsigned int nr_secs)
  121. {
  122. sector_t lba;
  123. spin_lock(&pblk->trans_lock);
  124. for (lba = slba; lba < slba + nr_secs; lba++) {
  125. struct ppa_addr ppa;
  126. ppa = pblk_trans_map_get(pblk, lba);
  127. if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
  128. pblk_map_invalidate(pblk, ppa);
  129. pblk_ppa_set_empty(&ppa);
  130. pblk_trans_map_set(pblk, lba, ppa);
  131. }
  132. spin_unlock(&pblk->trans_lock);
  133. }
  134. struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
  135. {
  136. mempool_t *pool;
  137. struct nvm_rq *rqd;
  138. int rq_size;
  139. if (rw == WRITE) {
  140. pool = pblk->w_rq_pool;
  141. rq_size = pblk_w_rq_size;
  142. } else {
  143. pool = pblk->r_rq_pool;
  144. rq_size = pblk_r_rq_size;
  145. }
  146. rqd = mempool_alloc(pool, GFP_KERNEL);
  147. memset(rqd, 0, rq_size);
  148. return rqd;
  149. }
  150. void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
  151. {
  152. mempool_t *pool;
  153. if (rw == WRITE)
  154. pool = pblk->w_rq_pool;
  155. else
  156. pool = pblk->r_rq_pool;
  157. mempool_free(rqd, pool);
  158. }
  159. void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
  160. int nr_pages)
  161. {
  162. struct bio_vec bv;
  163. int i;
  164. WARN_ON(off + nr_pages != bio->bi_vcnt);
  165. bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
  166. for (i = off; i < nr_pages + off; i++) {
  167. bv = bio->bi_io_vec[i];
  168. mempool_free(bv.bv_page, pblk->page_pool);
  169. }
  170. }
  171. int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
  172. int nr_pages)
  173. {
  174. struct request_queue *q = pblk->dev->q;
  175. struct page *page;
  176. int i, ret;
  177. for (i = 0; i < nr_pages; i++) {
  178. page = mempool_alloc(pblk->page_pool, flags);
  179. if (!page)
  180. goto err;
  181. ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
  182. if (ret != PBLK_EXPOSED_PAGE_SIZE) {
  183. pr_err("pblk: could not add page to bio\n");
  184. mempool_free(page, pblk->page_pool);
  185. goto err;
  186. }
  187. }
  188. return 0;
  189. err:
  190. pblk_bio_free_pages(pblk, bio, 0, i - 1);
  191. return -1;
  192. }
  193. static void pblk_write_kick(struct pblk *pblk)
  194. {
  195. wake_up_process(pblk->writer_ts);
  196. mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
  197. }
  198. void pblk_write_timer_fn(unsigned long data)
  199. {
  200. struct pblk *pblk = (struct pblk *)data;
  201. /* kick the write thread every tick to flush outstanding data */
  202. pblk_write_kick(pblk);
  203. }
  204. void pblk_write_should_kick(struct pblk *pblk)
  205. {
  206. unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
  207. if (secs_avail >= pblk->min_write_pgs)
  208. pblk_write_kick(pblk);
  209. }
  210. void pblk_end_bio_sync(struct bio *bio)
  211. {
  212. struct completion *waiting = bio->bi_private;
  213. complete(waiting);
  214. }
  215. void pblk_end_io_sync(struct nvm_rq *rqd)
  216. {
  217. struct completion *waiting = rqd->private;
  218. complete(waiting);
  219. }
  220. void pblk_flush_writer(struct pblk *pblk)
  221. {
  222. struct bio *bio;
  223. int ret;
  224. DECLARE_COMPLETION_ONSTACK(wait);
  225. bio = bio_alloc(GFP_KERNEL, 1);
  226. if (!bio)
  227. return;
  228. bio->bi_iter.bi_sector = 0; /* internal bio */
  229. bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_OP_FLUSH);
  230. bio->bi_private = &wait;
  231. bio->bi_end_io = pblk_end_bio_sync;
  232. ret = pblk_write_to_cache(pblk, bio, 0);
  233. if (ret == NVM_IO_OK) {
  234. if (!wait_for_completion_io_timeout(&wait,
  235. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  236. pr_err("pblk: flush cache timed out\n");
  237. }
  238. } else if (ret != NVM_IO_DONE) {
  239. pr_err("pblk: tear down bio failed\n");
  240. }
  241. if (bio->bi_error)
  242. pr_err("pblk: flush sync write failed (%u)\n", bio->bi_error);
  243. bio_put(bio);
  244. }
  245. struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
  246. {
  247. struct pblk_line_meta *lm = &pblk->lm;
  248. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  249. struct list_head *move_list = NULL;
  250. if (!line->vsc) {
  251. if (line->gc_group != PBLK_LINEGC_FULL) {
  252. line->gc_group = PBLK_LINEGC_FULL;
  253. move_list = &l_mg->gc_full_list;
  254. }
  255. } else if (line->vsc < lm->mid_thrs) {
  256. if (line->gc_group != PBLK_LINEGC_HIGH) {
  257. line->gc_group = PBLK_LINEGC_HIGH;
  258. move_list = &l_mg->gc_high_list;
  259. }
  260. } else if (line->vsc < lm->high_thrs) {
  261. if (line->gc_group != PBLK_LINEGC_MID) {
  262. line->gc_group = PBLK_LINEGC_MID;
  263. move_list = &l_mg->gc_mid_list;
  264. }
  265. } else if (line->vsc < line->sec_in_line) {
  266. if (line->gc_group != PBLK_LINEGC_LOW) {
  267. line->gc_group = PBLK_LINEGC_LOW;
  268. move_list = &l_mg->gc_low_list;
  269. }
  270. } else if (line->vsc == line->sec_in_line) {
  271. if (line->gc_group != PBLK_LINEGC_EMPTY) {
  272. line->gc_group = PBLK_LINEGC_EMPTY;
  273. move_list = &l_mg->gc_empty_list;
  274. }
  275. } else {
  276. line->state = PBLK_LINESTATE_CORRUPT;
  277. line->gc_group = PBLK_LINEGC_NONE;
  278. move_list = &l_mg->corrupt_list;
  279. pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
  280. line->id, line->vsc,
  281. line->sec_in_line,
  282. lm->high_thrs, lm->mid_thrs);
  283. }
  284. return move_list;
  285. }
  286. void pblk_discard(struct pblk *pblk, struct bio *bio)
  287. {
  288. sector_t slba = pblk_get_lba(bio);
  289. sector_t nr_secs = pblk_get_secs(bio);
  290. pblk_invalidate_range(pblk, slba, nr_secs);
  291. }
  292. struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
  293. {
  294. struct ppa_addr ppa;
  295. spin_lock(&pblk->trans_lock);
  296. ppa = pblk_trans_map_get(pblk, lba);
  297. spin_unlock(&pblk->trans_lock);
  298. return ppa;
  299. }
  300. void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
  301. {
  302. atomic_long_inc(&pblk->write_failed);
  303. #ifdef CONFIG_NVM_DEBUG
  304. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  305. #endif
  306. }
  307. void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
  308. {
  309. /* Empty page read is not necessarily an error (e.g., L2P recovery) */
  310. if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
  311. atomic_long_inc(&pblk->read_empty);
  312. return;
  313. }
  314. switch (rqd->error) {
  315. case NVM_RSP_WARN_HIGHECC:
  316. atomic_long_inc(&pblk->read_high_ecc);
  317. break;
  318. case NVM_RSP_ERR_FAILECC:
  319. case NVM_RSP_ERR_FAILCRC:
  320. atomic_long_inc(&pblk->read_failed);
  321. break;
  322. default:
  323. pr_err("pblk: unknown read error:%d\n", rqd->error);
  324. }
  325. #ifdef CONFIG_NVM_DEBUG
  326. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  327. #endif
  328. }
  329. int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
  330. {
  331. struct nvm_tgt_dev *dev = pblk->dev;
  332. #ifdef CONFIG_NVM_DEBUG
  333. struct ppa_addr *ppa_list;
  334. ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
  335. if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
  336. WARN_ON(1);
  337. return -EINVAL;
  338. }
  339. if (rqd->opcode == NVM_OP_PWRITE) {
  340. struct pblk_line *line;
  341. struct ppa_addr ppa;
  342. int i;
  343. for (i = 0; i < rqd->nr_ppas; i++) {
  344. ppa = ppa_list[i];
  345. line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
  346. spin_lock(&line->lock);
  347. if (line->state != PBLK_LINESTATE_OPEN) {
  348. pr_err("pblk: bad ppa: line:%d,state:%d\n",
  349. line->id, line->state);
  350. WARN_ON(1);
  351. spin_unlock(&line->lock);
  352. return -EINVAL;
  353. }
  354. spin_unlock(&line->lock);
  355. }
  356. }
  357. #endif
  358. return nvm_submit_io(dev, rqd);
  359. }
  360. struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
  361. unsigned int nr_secs, unsigned int len,
  362. gfp_t gfp_mask)
  363. {
  364. struct nvm_tgt_dev *dev = pblk->dev;
  365. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  366. void *kaddr = data;
  367. struct page *page;
  368. struct bio *bio;
  369. int i, ret;
  370. if (l_mg->emeta_alloc_type == PBLK_KMALLOC_META)
  371. return bio_map_kern(dev->q, kaddr, len, gfp_mask);
  372. bio = bio_kmalloc(gfp_mask, nr_secs);
  373. if (!bio)
  374. return ERR_PTR(-ENOMEM);
  375. for (i = 0; i < nr_secs; i++) {
  376. page = vmalloc_to_page(kaddr);
  377. if (!page) {
  378. pr_err("pblk: could not map vmalloc bio\n");
  379. bio_put(bio);
  380. bio = ERR_PTR(-ENOMEM);
  381. goto out;
  382. }
  383. ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
  384. if (ret != PAGE_SIZE) {
  385. pr_err("pblk: could not add page to bio\n");
  386. bio_put(bio);
  387. bio = ERR_PTR(-ENOMEM);
  388. goto out;
  389. }
  390. kaddr += PAGE_SIZE;
  391. }
  392. out:
  393. return bio;
  394. }
  395. int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
  396. unsigned long secs_to_flush)
  397. {
  398. int max = pblk->max_write_pgs;
  399. int min = pblk->min_write_pgs;
  400. int secs_to_sync = 0;
  401. if (secs_avail >= max)
  402. secs_to_sync = max;
  403. else if (secs_avail >= min)
  404. secs_to_sync = min * (secs_avail / min);
  405. else if (secs_to_flush)
  406. secs_to_sync = min;
  407. return secs_to_sync;
  408. }
  409. static u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line,
  410. int nr_secs)
  411. {
  412. u64 addr;
  413. int i;
  414. /* logic error: ppa out-of-bounds. Prevent generating bad address */
  415. if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
  416. WARN(1, "pblk: page allocation out of bounds\n");
  417. nr_secs = pblk->lm.sec_per_line - line->cur_sec;
  418. }
  419. line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
  420. pblk->lm.sec_per_line, line->cur_sec);
  421. for (i = 0; i < nr_secs; i++, line->cur_sec++)
  422. WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
  423. return addr;
  424. }
  425. u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  426. {
  427. u64 addr;
  428. /* Lock needed in case a write fails and a recovery needs to remap
  429. * failed write buffer entries
  430. */
  431. spin_lock(&line->lock);
  432. addr = __pblk_alloc_page(pblk, line, nr_secs);
  433. line->left_msecs -= nr_secs;
  434. WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
  435. spin_unlock(&line->lock);
  436. return addr;
  437. }
  438. /*
  439. * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
  440. * taking the per LUN semaphore.
  441. */
  442. static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
  443. u64 paddr, int dir)
  444. {
  445. struct nvm_tgt_dev *dev = pblk->dev;
  446. struct nvm_geo *geo = &dev->geo;
  447. struct pblk_line_meta *lm = &pblk->lm;
  448. struct bio *bio;
  449. struct nvm_rq rqd;
  450. struct ppa_addr *ppa_list;
  451. dma_addr_t dma_ppa_list;
  452. void *emeta = line->emeta;
  453. int min = pblk->min_write_pgs;
  454. int left_ppas = lm->emeta_sec;
  455. int id = line->id;
  456. int rq_ppas, rq_len;
  457. int cmd_op, bio_op;
  458. int flags;
  459. int i, j;
  460. int ret;
  461. DECLARE_COMPLETION_ONSTACK(wait);
  462. if (dir == WRITE) {
  463. bio_op = REQ_OP_WRITE;
  464. cmd_op = NVM_OP_PWRITE;
  465. flags = pblk_set_progr_mode(pblk, WRITE);
  466. } else if (dir == READ) {
  467. bio_op = REQ_OP_READ;
  468. cmd_op = NVM_OP_PREAD;
  469. flags = pblk_set_read_mode(pblk);
  470. } else
  471. return -EINVAL;
  472. ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_ppa_list);
  473. if (!ppa_list)
  474. return -ENOMEM;
  475. next_rq:
  476. memset(&rqd, 0, sizeof(struct nvm_rq));
  477. rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
  478. rq_len = rq_ppas * geo->sec_size;
  479. bio = pblk_bio_map_addr(pblk, emeta, rq_ppas, rq_len, GFP_KERNEL);
  480. if (IS_ERR(bio)) {
  481. ret = PTR_ERR(bio);
  482. goto free_rqd_dma;
  483. }
  484. bio->bi_iter.bi_sector = 0; /* internal bio */
  485. bio_set_op_attrs(bio, bio_op, 0);
  486. rqd.bio = bio;
  487. rqd.opcode = cmd_op;
  488. rqd.flags = flags;
  489. rqd.nr_ppas = rq_ppas;
  490. rqd.ppa_list = ppa_list;
  491. rqd.dma_ppa_list = dma_ppa_list;
  492. rqd.end_io = pblk_end_io_sync;
  493. rqd.private = &wait;
  494. if (dir == WRITE) {
  495. for (i = 0; i < rqd.nr_ppas; ) {
  496. spin_lock(&line->lock);
  497. paddr = __pblk_alloc_page(pblk, line, min);
  498. spin_unlock(&line->lock);
  499. for (j = 0; j < min; j++, i++, paddr++)
  500. rqd.ppa_list[i] =
  501. addr_to_gen_ppa(pblk, paddr, id);
  502. }
  503. } else {
  504. for (i = 0; i < rqd.nr_ppas; ) {
  505. struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
  506. int pos = pblk_dev_ppa_to_pos(geo, ppa);
  507. while (test_bit(pos, line->blk_bitmap)) {
  508. paddr += min;
  509. if (pblk_boundary_paddr_checks(pblk, paddr)) {
  510. pr_err("pblk: corrupt emeta line:%d\n",
  511. line->id);
  512. bio_put(bio);
  513. ret = -EINTR;
  514. goto free_rqd_dma;
  515. }
  516. ppa = addr_to_gen_ppa(pblk, paddr, id);
  517. pos = pblk_dev_ppa_to_pos(geo, ppa);
  518. }
  519. if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
  520. pr_err("pblk: corrupt emeta line:%d\n",
  521. line->id);
  522. bio_put(bio);
  523. ret = -EINTR;
  524. goto free_rqd_dma;
  525. }
  526. for (j = 0; j < min; j++, i++, paddr++)
  527. rqd.ppa_list[i] =
  528. addr_to_gen_ppa(pblk, paddr, line->id);
  529. }
  530. }
  531. ret = pblk_submit_io(pblk, &rqd);
  532. if (ret) {
  533. pr_err("pblk: emeta I/O submission failed: %d\n", ret);
  534. bio_put(bio);
  535. goto free_rqd_dma;
  536. }
  537. if (!wait_for_completion_io_timeout(&wait,
  538. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  539. pr_err("pblk: emeta I/O timed out\n");
  540. }
  541. reinit_completion(&wait);
  542. bio_put(bio);
  543. if (rqd.error) {
  544. if (dir == WRITE)
  545. pblk_log_write_err(pblk, &rqd);
  546. else
  547. pblk_log_read_err(pblk, &rqd);
  548. }
  549. emeta += rq_len;
  550. left_ppas -= rq_ppas;
  551. if (left_ppas)
  552. goto next_rq;
  553. free_rqd_dma:
  554. nvm_dev_dma_free(dev->parent, ppa_list, dma_ppa_list);
  555. return ret;
  556. }
  557. u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
  558. {
  559. struct nvm_tgt_dev *dev = pblk->dev;
  560. struct nvm_geo *geo = &dev->geo;
  561. struct pblk_line_meta *lm = &pblk->lm;
  562. int bit;
  563. /* This usually only happens on bad lines */
  564. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  565. if (bit >= lm->blk_per_line)
  566. return -1;
  567. return bit * geo->sec_per_pl;
  568. }
  569. static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
  570. u64 paddr, int dir)
  571. {
  572. struct nvm_tgt_dev *dev = pblk->dev;
  573. struct pblk_line_meta *lm = &pblk->lm;
  574. struct bio *bio;
  575. struct nvm_rq rqd;
  576. __le64 *lba_list = NULL;
  577. int i, ret;
  578. int cmd_op, bio_op;
  579. int flags;
  580. DECLARE_COMPLETION_ONSTACK(wait);
  581. if (dir == WRITE) {
  582. bio_op = REQ_OP_WRITE;
  583. cmd_op = NVM_OP_PWRITE;
  584. flags = pblk_set_progr_mode(pblk, WRITE);
  585. lba_list = pblk_line_emeta_to_lbas(line->emeta);
  586. } else if (dir == READ) {
  587. bio_op = REQ_OP_READ;
  588. cmd_op = NVM_OP_PREAD;
  589. flags = pblk_set_read_mode(pblk);
  590. } else
  591. return -EINVAL;
  592. memset(&rqd, 0, sizeof(struct nvm_rq));
  593. rqd.ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  594. &rqd.dma_ppa_list);
  595. if (!rqd.ppa_list)
  596. return -ENOMEM;
  597. bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
  598. if (IS_ERR(bio)) {
  599. ret = PTR_ERR(bio);
  600. goto free_ppa_list;
  601. }
  602. bio->bi_iter.bi_sector = 0; /* internal bio */
  603. bio_set_op_attrs(bio, bio_op, 0);
  604. rqd.bio = bio;
  605. rqd.opcode = cmd_op;
  606. rqd.flags = flags;
  607. rqd.nr_ppas = lm->smeta_sec;
  608. rqd.end_io = pblk_end_io_sync;
  609. rqd.private = &wait;
  610. for (i = 0; i < lm->smeta_sec; i++, paddr++) {
  611. rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
  612. if (dir == WRITE)
  613. lba_list[paddr] = cpu_to_le64(ADDR_EMPTY);
  614. }
  615. /*
  616. * This I/O is sent by the write thread when a line is replace. Since
  617. * the write thread is the only one sending write and erase commands,
  618. * there is no need to take the LUN semaphore.
  619. */
  620. ret = pblk_submit_io(pblk, &rqd);
  621. if (ret) {
  622. pr_err("pblk: smeta I/O submission failed: %d\n", ret);
  623. bio_put(bio);
  624. goto free_ppa_list;
  625. }
  626. if (!wait_for_completion_io_timeout(&wait,
  627. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  628. pr_err("pblk: smeta I/O timed out\n");
  629. }
  630. if (rqd.error) {
  631. if (dir == WRITE)
  632. pblk_log_write_err(pblk, &rqd);
  633. else
  634. pblk_log_read_err(pblk, &rqd);
  635. }
  636. free_ppa_list:
  637. nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
  638. return ret;
  639. }
  640. int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
  641. {
  642. u64 bpaddr = pblk_line_smeta_start(pblk, line);
  643. return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
  644. }
  645. int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line)
  646. {
  647. return pblk_line_submit_emeta_io(pblk, line, line->emeta_ssec, READ);
  648. }
  649. static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
  650. struct ppa_addr ppa)
  651. {
  652. rqd->opcode = NVM_OP_ERASE;
  653. rqd->ppa_addr = ppa;
  654. rqd->nr_ppas = 1;
  655. rqd->flags = pblk_set_progr_mode(pblk, ERASE);
  656. rqd->bio = NULL;
  657. }
  658. static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
  659. {
  660. struct nvm_rq rqd;
  661. int ret;
  662. DECLARE_COMPLETION_ONSTACK(wait);
  663. memset(&rqd, 0, sizeof(struct nvm_rq));
  664. pblk_setup_e_rq(pblk, &rqd, ppa);
  665. rqd.end_io = pblk_end_io_sync;
  666. rqd.private = &wait;
  667. /* The write thread schedules erases so that it minimizes disturbances
  668. * with writes. Thus, there is no need to take the LUN semaphore.
  669. */
  670. ret = pblk_submit_io(pblk, &rqd);
  671. if (ret) {
  672. struct nvm_tgt_dev *dev = pblk->dev;
  673. struct nvm_geo *geo = &dev->geo;
  674. pr_err("pblk: could not sync erase line:%d,blk:%d\n",
  675. pblk_dev_ppa_to_line(ppa),
  676. pblk_dev_ppa_to_pos(geo, ppa));
  677. rqd.error = ret;
  678. goto out;
  679. }
  680. if (!wait_for_completion_io_timeout(&wait,
  681. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  682. pr_err("pblk: sync erase timed out\n");
  683. }
  684. out:
  685. rqd.private = pblk;
  686. __pblk_end_io_erase(pblk, &rqd);
  687. return 0;
  688. }
  689. int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
  690. {
  691. struct pblk_line_meta *lm = &pblk->lm;
  692. struct ppa_addr ppa;
  693. int bit = -1;
  694. /* Erase one block at the time and only erase good blocks */
  695. while ((bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
  696. bit + 1)) < lm->blk_per_line) {
  697. ppa = pblk->luns[bit].bppa; /* set ch and lun */
  698. ppa.g.blk = line->id;
  699. /* If the erase fails, the block is bad and should be marked */
  700. line->left_eblks--;
  701. WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
  702. if (pblk_blk_erase_sync(pblk, ppa)) {
  703. pr_err("pblk: failed to erase line %d\n", line->id);
  704. return -ENOMEM;
  705. }
  706. }
  707. return 0;
  708. }
  709. /* For now lines are always assumed full lines. Thus, smeta former and current
  710. * lun bitmaps are omitted.
  711. */
  712. static int pblk_line_set_metadata(struct pblk *pblk, struct pblk_line *line,
  713. struct pblk_line *cur)
  714. {
  715. struct nvm_tgt_dev *dev = pblk->dev;
  716. struct nvm_geo *geo = &dev->geo;
  717. struct pblk_line_meta *lm = &pblk->lm;
  718. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  719. struct line_smeta *smeta = line->smeta;
  720. struct line_emeta *emeta = line->emeta;
  721. int nr_blk_line;
  722. /* After erasing the line, new bad blocks might appear and we risk
  723. * having an invalid line
  724. */
  725. nr_blk_line = lm->blk_per_line -
  726. bitmap_weight(line->blk_bitmap, lm->blk_per_line);
  727. if (nr_blk_line < lm->min_blk_line) {
  728. spin_lock(&l_mg->free_lock);
  729. spin_lock(&line->lock);
  730. line->state = PBLK_LINESTATE_BAD;
  731. spin_unlock(&line->lock);
  732. list_add_tail(&line->list, &l_mg->bad_list);
  733. spin_unlock(&l_mg->free_lock);
  734. pr_debug("pblk: line %d is bad\n", line->id);
  735. return 0;
  736. }
  737. /* Run-time metadata */
  738. line->lun_bitmap = ((void *)(smeta)) + sizeof(struct line_smeta);
  739. /* Mark LUNs allocated in this line (all for now) */
  740. bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
  741. smeta->header.identifier = cpu_to_le32(PBLK_MAGIC);
  742. memcpy(smeta->header.uuid, pblk->instance_uuid, 16);
  743. smeta->header.id = cpu_to_le32(line->id);
  744. smeta->header.type = cpu_to_le16(line->type);
  745. smeta->header.version = cpu_to_le16(1);
  746. /* Start metadata */
  747. smeta->seq_nr = cpu_to_le64(line->seq_nr);
  748. smeta->window_wr_lun = cpu_to_le32(geo->nr_luns);
  749. /* Fill metadata among lines */
  750. if (cur) {
  751. memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
  752. smeta->prev_id = cpu_to_le32(cur->id);
  753. cur->emeta->next_id = cpu_to_le32(line->id);
  754. } else {
  755. smeta->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
  756. }
  757. /* All smeta must be set at this point */
  758. smeta->header.crc = cpu_to_le32(pblk_calc_meta_header_crc(pblk, smeta));
  759. smeta->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta));
  760. /* End metadata */
  761. memcpy(&emeta->header, &smeta->header, sizeof(struct line_header));
  762. emeta->seq_nr = cpu_to_le64(line->seq_nr);
  763. emeta->nr_lbas = cpu_to_le64(line->sec_in_line);
  764. emeta->nr_valid_lbas = cpu_to_le64(0);
  765. emeta->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
  766. emeta->crc = cpu_to_le32(0);
  767. emeta->prev_id = smeta->prev_id;
  768. return 1;
  769. }
  770. /* For now lines are always assumed full lines. Thus, smeta former and current
  771. * lun bitmaps are omitted.
  772. */
  773. static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
  774. int init)
  775. {
  776. struct nvm_tgt_dev *dev = pblk->dev;
  777. struct nvm_geo *geo = &dev->geo;
  778. struct pblk_line_meta *lm = &pblk->lm;
  779. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  780. int nr_bb = 0;
  781. u64 off;
  782. int bit = -1;
  783. line->sec_in_line = lm->sec_per_line;
  784. /* Capture bad block information on line mapping bitmaps */
  785. while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
  786. bit + 1)) < lm->blk_per_line) {
  787. off = bit * geo->sec_per_pl;
  788. bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
  789. lm->sec_per_line);
  790. bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
  791. lm->sec_per_line);
  792. line->sec_in_line -= geo->sec_per_blk;
  793. if (bit >= lm->emeta_bb)
  794. nr_bb++;
  795. }
  796. /* Mark smeta metadata sectors as bad sectors */
  797. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  798. off = bit * geo->sec_per_pl;
  799. retry_smeta:
  800. bitmap_set(line->map_bitmap, off, lm->smeta_sec);
  801. line->sec_in_line -= lm->smeta_sec;
  802. line->smeta_ssec = off;
  803. line->cur_sec = off + lm->smeta_sec;
  804. if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
  805. pr_debug("pblk: line smeta I/O failed. Retry\n");
  806. off += geo->sec_per_pl;
  807. goto retry_smeta;
  808. }
  809. bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
  810. /* Mark emeta metadata sectors as bad sectors. We need to consider bad
  811. * blocks to make sure that there are enough sectors to store emeta
  812. */
  813. bit = lm->sec_per_line;
  814. off = lm->sec_per_line - lm->emeta_sec;
  815. bitmap_set(line->invalid_bitmap, off, lm->emeta_sec);
  816. while (nr_bb) {
  817. off -= geo->sec_per_pl;
  818. if (!test_bit(off, line->invalid_bitmap)) {
  819. bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
  820. nr_bb--;
  821. }
  822. }
  823. line->sec_in_line -= lm->emeta_sec;
  824. line->emeta_ssec = off;
  825. line->vsc = line->left_ssecs = line->left_msecs = line->sec_in_line;
  826. if (lm->sec_per_line - line->sec_in_line !=
  827. bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
  828. spin_lock(&line->lock);
  829. line->state = PBLK_LINESTATE_BAD;
  830. spin_unlock(&line->lock);
  831. list_add_tail(&line->list, &l_mg->bad_list);
  832. pr_err("pblk: unexpected line %d is bad\n", line->id);
  833. return 0;
  834. }
  835. return 1;
  836. }
  837. static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
  838. {
  839. struct pblk_line_meta *lm = &pblk->lm;
  840. line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
  841. if (!line->map_bitmap)
  842. return -ENOMEM;
  843. memset(line->map_bitmap, 0, lm->sec_bitmap_len);
  844. /* invalid_bitmap is special since it is used when line is closed. No
  845. * need to zeroized; it will be initialized using bb info form
  846. * map_bitmap
  847. */
  848. line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
  849. if (!line->invalid_bitmap) {
  850. mempool_free(line->map_bitmap, pblk->line_meta_pool);
  851. return -ENOMEM;
  852. }
  853. spin_lock(&line->lock);
  854. if (line->state != PBLK_LINESTATE_FREE) {
  855. spin_unlock(&line->lock);
  856. WARN(1, "pblk: corrupted line state\n");
  857. return -EINTR;
  858. }
  859. line->state = PBLK_LINESTATE_OPEN;
  860. spin_unlock(&line->lock);
  861. /* Bad blocks do not need to be erased */
  862. bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
  863. line->left_eblks = line->blk_in_line;
  864. atomic_set(&line->left_seblks, line->left_eblks);
  865. kref_init(&line->ref);
  866. return 0;
  867. }
  868. int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
  869. {
  870. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  871. int ret;
  872. spin_lock(&l_mg->free_lock);
  873. l_mg->data_line = line;
  874. list_del(&line->list);
  875. spin_unlock(&l_mg->free_lock);
  876. ret = pblk_line_prepare(pblk, line);
  877. if (ret) {
  878. list_add(&line->list, &l_mg->free_list);
  879. return ret;
  880. }
  881. pblk_rl_free_lines_dec(&pblk->rl, line);
  882. if (!pblk_line_init_bb(pblk, line, 0)) {
  883. list_add(&line->list, &l_mg->free_list);
  884. return -EINTR;
  885. }
  886. return 0;
  887. }
  888. void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
  889. {
  890. mempool_free(line->map_bitmap, pblk->line_meta_pool);
  891. line->map_bitmap = NULL;
  892. line->smeta = NULL;
  893. line->emeta = NULL;
  894. }
  895. struct pblk_line *pblk_line_get(struct pblk *pblk)
  896. {
  897. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  898. struct pblk_line_meta *lm = &pblk->lm;
  899. struct pblk_line *line = NULL;
  900. int bit;
  901. lockdep_assert_held(&l_mg->free_lock);
  902. retry_get:
  903. if (list_empty(&l_mg->free_list)) {
  904. pr_err("pblk: no free lines\n");
  905. goto out;
  906. }
  907. line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
  908. list_del(&line->list);
  909. l_mg->nr_free_lines--;
  910. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  911. if (unlikely(bit >= lm->blk_per_line)) {
  912. spin_lock(&line->lock);
  913. line->state = PBLK_LINESTATE_BAD;
  914. spin_unlock(&line->lock);
  915. list_add_tail(&line->list, &l_mg->bad_list);
  916. pr_debug("pblk: line %d is bad\n", line->id);
  917. goto retry_get;
  918. }
  919. if (pblk_line_prepare(pblk, line)) {
  920. pr_err("pblk: failed to prepare line %d\n", line->id);
  921. list_add(&line->list, &l_mg->free_list);
  922. return NULL;
  923. }
  924. out:
  925. return line;
  926. }
  927. static struct pblk_line *pblk_line_retry(struct pblk *pblk,
  928. struct pblk_line *line)
  929. {
  930. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  931. struct pblk_line *retry_line;
  932. spin_lock(&l_mg->free_lock);
  933. retry_line = pblk_line_get(pblk);
  934. if (!retry_line) {
  935. spin_unlock(&l_mg->free_lock);
  936. return NULL;
  937. }
  938. retry_line->smeta = line->smeta;
  939. retry_line->emeta = line->emeta;
  940. retry_line->meta_line = line->meta_line;
  941. retry_line->map_bitmap = line->map_bitmap;
  942. retry_line->invalid_bitmap = line->invalid_bitmap;
  943. line->map_bitmap = NULL;
  944. line->invalid_bitmap = NULL;
  945. line->smeta = NULL;
  946. line->emeta = NULL;
  947. spin_unlock(&l_mg->free_lock);
  948. if (pblk_line_erase(pblk, retry_line))
  949. return NULL;
  950. pblk_rl_free_lines_dec(&pblk->rl, retry_line);
  951. l_mg->data_line = retry_line;
  952. return retry_line;
  953. }
  954. struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
  955. {
  956. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  957. struct pblk_line *line;
  958. int meta_line;
  959. int is_next = 0;
  960. spin_lock(&l_mg->free_lock);
  961. line = pblk_line_get(pblk);
  962. if (!line) {
  963. spin_unlock(&l_mg->free_lock);
  964. return NULL;
  965. }
  966. line->seq_nr = l_mg->d_seq_nr++;
  967. line->type = PBLK_LINETYPE_DATA;
  968. l_mg->data_line = line;
  969. meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  970. set_bit(meta_line, &l_mg->meta_bitmap);
  971. line->smeta = l_mg->sline_meta[meta_line].meta;
  972. line->emeta = l_mg->eline_meta[meta_line].meta;
  973. line->meta_line = meta_line;
  974. /* Allocate next line for preparation */
  975. l_mg->data_next = pblk_line_get(pblk);
  976. if (l_mg->data_next) {
  977. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  978. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  979. is_next = 1;
  980. }
  981. spin_unlock(&l_mg->free_lock);
  982. pblk_rl_free_lines_dec(&pblk->rl, line);
  983. if (is_next)
  984. pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
  985. if (pblk_line_erase(pblk, line))
  986. return NULL;
  987. retry_setup:
  988. if (!pblk_line_set_metadata(pblk, line, NULL)) {
  989. line = pblk_line_retry(pblk, line);
  990. if (!line)
  991. return NULL;
  992. goto retry_setup;
  993. }
  994. if (!pblk_line_init_bb(pblk, line, 1)) {
  995. line = pblk_line_retry(pblk, line);
  996. if (!line)
  997. return NULL;
  998. goto retry_setup;
  999. }
  1000. return line;
  1001. }
  1002. struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
  1003. {
  1004. struct pblk_line_meta *lm = &pblk->lm;
  1005. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1006. struct pblk_line *cur, *new;
  1007. unsigned int left_seblks;
  1008. int meta_line;
  1009. int is_next = 0;
  1010. cur = l_mg->data_line;
  1011. new = l_mg->data_next;
  1012. if (!new)
  1013. return NULL;
  1014. l_mg->data_line = new;
  1015. retry_line:
  1016. left_seblks = atomic_read(&new->left_seblks);
  1017. if (left_seblks) {
  1018. /* If line is not fully erased, erase it */
  1019. if (new->left_eblks) {
  1020. if (pblk_line_erase(pblk, new))
  1021. return NULL;
  1022. } else {
  1023. io_schedule();
  1024. }
  1025. goto retry_line;
  1026. }
  1027. spin_lock(&l_mg->free_lock);
  1028. /* Allocate next line for preparation */
  1029. l_mg->data_next = pblk_line_get(pblk);
  1030. if (l_mg->data_next) {
  1031. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1032. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1033. is_next = 1;
  1034. }
  1035. retry_meta:
  1036. meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  1037. if (meta_line == PBLK_DATA_LINES) {
  1038. spin_unlock(&l_mg->free_lock);
  1039. io_schedule();
  1040. spin_lock(&l_mg->free_lock);
  1041. goto retry_meta;
  1042. }
  1043. set_bit(meta_line, &l_mg->meta_bitmap);
  1044. new->smeta = l_mg->sline_meta[meta_line].meta;
  1045. new->emeta = l_mg->eline_meta[meta_line].meta;
  1046. new->meta_line = meta_line;
  1047. memset(new->smeta, 0, lm->smeta_len);
  1048. memset(new->emeta, 0, lm->emeta_len);
  1049. spin_unlock(&l_mg->free_lock);
  1050. if (is_next)
  1051. pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
  1052. retry_setup:
  1053. if (!pblk_line_set_metadata(pblk, new, cur)) {
  1054. new = pblk_line_retry(pblk, new);
  1055. if (new)
  1056. return NULL;
  1057. goto retry_setup;
  1058. }
  1059. if (!pblk_line_init_bb(pblk, new, 1)) {
  1060. new = pblk_line_retry(pblk, new);
  1061. if (!new)
  1062. return NULL;
  1063. goto retry_setup;
  1064. }
  1065. return new;
  1066. }
  1067. void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
  1068. {
  1069. if (line->map_bitmap)
  1070. mempool_free(line->map_bitmap, pblk->line_meta_pool);
  1071. if (line->invalid_bitmap)
  1072. mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
  1073. line->map_bitmap = NULL;
  1074. line->invalid_bitmap = NULL;
  1075. }
  1076. void pblk_line_put(struct kref *ref)
  1077. {
  1078. struct pblk_line *line = container_of(ref, struct pblk_line, ref);
  1079. struct pblk *pblk = line->pblk;
  1080. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1081. spin_lock(&line->lock);
  1082. WARN_ON(line->state != PBLK_LINESTATE_GC);
  1083. line->state = PBLK_LINESTATE_FREE;
  1084. line->gc_group = PBLK_LINEGC_NONE;
  1085. pblk_line_free(pblk, line);
  1086. spin_unlock(&line->lock);
  1087. spin_lock(&l_mg->free_lock);
  1088. list_add_tail(&line->list, &l_mg->free_list);
  1089. l_mg->nr_free_lines++;
  1090. spin_unlock(&l_mg->free_lock);
  1091. pblk_rl_free_lines_inc(&pblk->rl, line);
  1092. }
  1093. int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
  1094. {
  1095. struct nvm_rq *rqd;
  1096. int err;
  1097. rqd = mempool_alloc(pblk->r_rq_pool, GFP_KERNEL);
  1098. memset(rqd, 0, pblk_r_rq_size);
  1099. pblk_setup_e_rq(pblk, rqd, ppa);
  1100. rqd->end_io = pblk_end_io_erase;
  1101. rqd->private = pblk;
  1102. /* The write thread schedules erases so that it minimizes disturbances
  1103. * with writes. Thus, there is no need to take the LUN semaphore.
  1104. */
  1105. err = pblk_submit_io(pblk, rqd);
  1106. if (err) {
  1107. struct nvm_tgt_dev *dev = pblk->dev;
  1108. struct nvm_geo *geo = &dev->geo;
  1109. pr_err("pblk: could not async erase line:%d,blk:%d\n",
  1110. pblk_dev_ppa_to_line(ppa),
  1111. pblk_dev_ppa_to_pos(geo, ppa));
  1112. }
  1113. return err;
  1114. }
  1115. struct pblk_line *pblk_line_get_data(struct pblk *pblk)
  1116. {
  1117. return pblk->l_mg.data_line;
  1118. }
  1119. struct pblk_line *pblk_line_get_data_next(struct pblk *pblk)
  1120. {
  1121. return pblk->l_mg.data_next;
  1122. }
  1123. int pblk_line_is_full(struct pblk_line *line)
  1124. {
  1125. return (line->left_msecs == 0);
  1126. }
  1127. void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
  1128. {
  1129. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1130. struct list_head *move_list;
  1131. line->emeta->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, line->emeta));
  1132. if (pblk_line_submit_emeta_io(pblk, line, line->cur_sec, WRITE))
  1133. pr_err("pblk: line %d close I/O failed\n", line->id);
  1134. WARN(!bitmap_full(line->map_bitmap, line->sec_in_line),
  1135. "pblk: corrupt closed line %d\n", line->id);
  1136. spin_lock(&l_mg->free_lock);
  1137. WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
  1138. spin_unlock(&l_mg->free_lock);
  1139. spin_lock(&l_mg->gc_lock);
  1140. spin_lock(&line->lock);
  1141. WARN_ON(line->state != PBLK_LINESTATE_OPEN);
  1142. line->state = PBLK_LINESTATE_CLOSED;
  1143. move_list = pblk_line_gc_list(pblk, line);
  1144. list_add_tail(&line->list, move_list);
  1145. mempool_free(line->map_bitmap, pblk->line_meta_pool);
  1146. line->map_bitmap = NULL;
  1147. line->smeta = NULL;
  1148. line->emeta = NULL;
  1149. spin_unlock(&line->lock);
  1150. spin_unlock(&l_mg->gc_lock);
  1151. }
  1152. void pblk_line_close_ws(struct work_struct *work)
  1153. {
  1154. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  1155. ws);
  1156. struct pblk *pblk = line_ws->pblk;
  1157. struct pblk_line *line = line_ws->line;
  1158. pblk_line_close(pblk, line);
  1159. mempool_free(line_ws, pblk->line_ws_pool);
  1160. }
  1161. void pblk_line_mark_bb(struct work_struct *work)
  1162. {
  1163. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  1164. ws);
  1165. struct pblk *pblk = line_ws->pblk;
  1166. struct nvm_tgt_dev *dev = pblk->dev;
  1167. struct ppa_addr *ppa = line_ws->priv;
  1168. int ret;
  1169. ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
  1170. if (ret) {
  1171. struct pblk_line *line;
  1172. int pos;
  1173. line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
  1174. pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
  1175. pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
  1176. line->id, pos);
  1177. }
  1178. kfree(ppa);
  1179. mempool_free(line_ws, pblk->line_ws_pool);
  1180. }
  1181. void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
  1182. void (*work)(struct work_struct *))
  1183. {
  1184. struct pblk_line_ws *line_ws;
  1185. line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
  1186. if (!line_ws)
  1187. return;
  1188. line_ws->pblk = pblk;
  1189. line_ws->line = line;
  1190. line_ws->priv = priv;
  1191. INIT_WORK(&line_ws->ws, work);
  1192. queue_work(pblk->kw_wq, &line_ws->ws);
  1193. }
  1194. void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1195. unsigned long *lun_bitmap)
  1196. {
  1197. struct nvm_tgt_dev *dev = pblk->dev;
  1198. struct nvm_geo *geo = &dev->geo;
  1199. struct pblk_lun *rlun;
  1200. int lun_id = ppa_list[0].g.ch * geo->luns_per_chnl + ppa_list[0].g.lun;
  1201. int ret;
  1202. /*
  1203. * Only send one inflight I/O per LUN. Since we map at a page
  1204. * granurality, all ppas in the I/O will map to the same LUN
  1205. */
  1206. #ifdef CONFIG_NVM_DEBUG
  1207. int i;
  1208. for (i = 1; i < nr_ppas; i++)
  1209. WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
  1210. ppa_list[0].g.ch != ppa_list[i].g.ch);
  1211. #endif
  1212. /* If the LUN has been locked for this same request, do no attempt to
  1213. * lock it again
  1214. */
  1215. if (test_and_set_bit(lun_id, lun_bitmap))
  1216. return;
  1217. rlun = &pblk->luns[lun_id];
  1218. ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
  1219. if (ret) {
  1220. switch (ret) {
  1221. case -ETIME:
  1222. pr_err("pblk: lun semaphore timed out\n");
  1223. break;
  1224. case -EINTR:
  1225. pr_err("pblk: lun semaphore timed out\n");
  1226. break;
  1227. }
  1228. }
  1229. }
  1230. void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1231. unsigned long *lun_bitmap)
  1232. {
  1233. struct nvm_tgt_dev *dev = pblk->dev;
  1234. struct nvm_geo *geo = &dev->geo;
  1235. struct pblk_lun *rlun;
  1236. int nr_luns = geo->nr_luns;
  1237. int bit = -1;
  1238. while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
  1239. rlun = &pblk->luns[bit];
  1240. up(&rlun->wr_sem);
  1241. }
  1242. kfree(lun_bitmap);
  1243. }
  1244. void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1245. {
  1246. struct ppa_addr l2p_ppa;
  1247. /* logic error: lba out-of-bounds. Ignore update */
  1248. if (!(lba < pblk->rl.nr_secs)) {
  1249. WARN(1, "pblk: corrupted L2P map request\n");
  1250. return;
  1251. }
  1252. spin_lock(&pblk->trans_lock);
  1253. l2p_ppa = pblk_trans_map_get(pblk, lba);
  1254. if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
  1255. pblk_map_invalidate(pblk, l2p_ppa);
  1256. pblk_trans_map_set(pblk, lba, ppa);
  1257. spin_unlock(&pblk->trans_lock);
  1258. }
  1259. void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1260. {
  1261. #ifdef CONFIG_NVM_DEBUG
  1262. /* Callers must ensure that the ppa points to a cache address */
  1263. BUG_ON(!pblk_addr_in_cache(ppa));
  1264. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
  1265. #endif
  1266. pblk_update_map(pblk, lba, ppa);
  1267. }
  1268. int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
  1269. struct pblk_line *gc_line)
  1270. {
  1271. struct ppa_addr l2p_ppa;
  1272. int ret = 1;
  1273. #ifdef CONFIG_NVM_DEBUG
  1274. /* Callers must ensure that the ppa points to a cache address */
  1275. BUG_ON(!pblk_addr_in_cache(ppa));
  1276. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
  1277. #endif
  1278. /* logic error: lba out-of-bounds. Ignore update */
  1279. if (!(lba < pblk->rl.nr_secs)) {
  1280. WARN(1, "pblk: corrupted L2P map request\n");
  1281. return 0;
  1282. }
  1283. spin_lock(&pblk->trans_lock);
  1284. l2p_ppa = pblk_trans_map_get(pblk, lba);
  1285. /* Prevent updated entries to be overwritten by GC */
  1286. if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
  1287. pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
  1288. ret = 0;
  1289. goto out;
  1290. }
  1291. pblk_trans_map_set(pblk, lba, ppa);
  1292. out:
  1293. spin_unlock(&pblk->trans_lock);
  1294. return ret;
  1295. }
  1296. void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
  1297. struct ppa_addr entry_line)
  1298. {
  1299. struct ppa_addr l2p_line;
  1300. #ifdef CONFIG_NVM_DEBUG
  1301. /* Callers must ensure that the ppa points to a device address */
  1302. BUG_ON(pblk_addr_in_cache(ppa));
  1303. #endif
  1304. /* Invalidate and discard padded entries */
  1305. if (lba == ADDR_EMPTY) {
  1306. #ifdef CONFIG_NVM_DEBUG
  1307. atomic_long_inc(&pblk->padded_wb);
  1308. #endif
  1309. pblk_map_invalidate(pblk, ppa);
  1310. return;
  1311. }
  1312. /* logic error: lba out-of-bounds. Ignore update */
  1313. if (!(lba < pblk->rl.nr_secs)) {
  1314. WARN(1, "pblk: corrupted L2P map request\n");
  1315. return;
  1316. }
  1317. spin_lock(&pblk->trans_lock);
  1318. l2p_line = pblk_trans_map_get(pblk, lba);
  1319. /* Do not update L2P if the cacheline has been updated. In this case,
  1320. * the mapped ppa must be invalidated
  1321. */
  1322. if (l2p_line.ppa != entry_line.ppa) {
  1323. if (!pblk_ppa_empty(ppa))
  1324. pblk_map_invalidate(pblk, ppa);
  1325. goto out;
  1326. }
  1327. #ifdef CONFIG_NVM_DEBUG
  1328. WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
  1329. #endif
  1330. pblk_trans_map_set(pblk, lba, ppa);
  1331. out:
  1332. spin_unlock(&pblk->trans_lock);
  1333. }
  1334. void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
  1335. sector_t blba, int nr_secs)
  1336. {
  1337. int i;
  1338. spin_lock(&pblk->trans_lock);
  1339. for (i = 0; i < nr_secs; i++)
  1340. ppas[i] = pblk_trans_map_get(pblk, blba + i);
  1341. spin_unlock(&pblk->trans_lock);
  1342. }
  1343. void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
  1344. u64 *lba_list, int nr_secs)
  1345. {
  1346. sector_t lba;
  1347. int i;
  1348. spin_lock(&pblk->trans_lock);
  1349. for (i = 0; i < nr_secs; i++) {
  1350. lba = lba_list[i];
  1351. if (lba == ADDR_EMPTY) {
  1352. ppas[i].ppa = ADDR_EMPTY;
  1353. } else {
  1354. /* logic error: lba out-of-bounds. Ignore update */
  1355. if (!(lba < pblk->rl.nr_secs)) {
  1356. WARN(1, "pblk: corrupted L2P map request\n");
  1357. continue;
  1358. }
  1359. ppas[i] = pblk_trans_map_get(pblk, lba);
  1360. }
  1361. }
  1362. spin_unlock(&pblk->trans_lock);
  1363. }