pblk-core.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-core.c - pblk's core functionality
  16. *
  17. */
  18. #include "pblk.h"
  19. static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
  20. struct ppa_addr *ppa)
  21. {
  22. struct nvm_tgt_dev *dev = pblk->dev;
  23. struct nvm_geo *geo = &dev->geo;
  24. int pos = pblk_dev_ppa_to_pos(geo, *ppa);
  25. pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
  26. atomic_long_inc(&pblk->erase_failed);
  27. atomic_dec(&line->blk_in_line);
  28. if (test_and_set_bit(pos, line->blk_bitmap))
  29. pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
  30. line->id, pos);
  31. pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
  32. GFP_ATOMIC, pblk->bb_wq);
  33. }
  34. static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
  35. {
  36. struct pblk_line *line;
  37. line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
  38. atomic_dec(&line->left_seblks);
  39. if (rqd->error) {
  40. struct ppa_addr *ppa;
  41. ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
  42. if (!ppa)
  43. return;
  44. *ppa = rqd->ppa_addr;
  45. pblk_mark_bb(pblk, line, ppa);
  46. }
  47. atomic_dec(&pblk->inflight_io);
  48. }
  49. /* Erase completion assumes that only one block is erased at the time */
  50. static void pblk_end_io_erase(struct nvm_rq *rqd)
  51. {
  52. struct pblk *pblk = rqd->private;
  53. __pblk_end_io_erase(pblk, rqd);
  54. mempool_free(rqd, pblk->e_rq_pool);
  55. }
  56. void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
  57. u64 paddr)
  58. {
  59. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  60. struct list_head *move_list = NULL;
  61. /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
  62. * table is modified with reclaimed sectors, a check is done to endure
  63. * that newer updates are not overwritten.
  64. */
  65. spin_lock(&line->lock);
  66. WARN_ON(line->state == PBLK_LINESTATE_FREE);
  67. if (test_and_set_bit(paddr, line->invalid_bitmap)) {
  68. WARN_ONCE(1, "pblk: double invalidate\n");
  69. spin_unlock(&line->lock);
  70. return;
  71. }
  72. le32_add_cpu(line->vsc, -1);
  73. if (line->state == PBLK_LINESTATE_CLOSED)
  74. move_list = pblk_line_gc_list(pblk, line);
  75. spin_unlock(&line->lock);
  76. if (move_list) {
  77. spin_lock(&l_mg->gc_lock);
  78. spin_lock(&line->lock);
  79. /* Prevent moving a line that has just been chosen for GC */
  80. if (line->state == PBLK_LINESTATE_GC) {
  81. spin_unlock(&line->lock);
  82. spin_unlock(&l_mg->gc_lock);
  83. return;
  84. }
  85. spin_unlock(&line->lock);
  86. list_move_tail(&line->list, move_list);
  87. spin_unlock(&l_mg->gc_lock);
  88. }
  89. }
  90. void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
  91. {
  92. struct pblk_line *line;
  93. u64 paddr;
  94. int line_id;
  95. #ifdef CONFIG_NVM_DEBUG
  96. /* Callers must ensure that the ppa points to a device address */
  97. BUG_ON(pblk_addr_in_cache(ppa));
  98. BUG_ON(pblk_ppa_empty(ppa));
  99. #endif
  100. line_id = pblk_tgt_ppa_to_line(ppa);
  101. line = &pblk->lines[line_id];
  102. paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
  103. __pblk_map_invalidate(pblk, line, paddr);
  104. }
  105. static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
  106. unsigned int nr_secs)
  107. {
  108. sector_t lba;
  109. spin_lock(&pblk->trans_lock);
  110. for (lba = slba; lba < slba + nr_secs; lba++) {
  111. struct ppa_addr ppa;
  112. ppa = pblk_trans_map_get(pblk, lba);
  113. if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
  114. pblk_map_invalidate(pblk, ppa);
  115. pblk_ppa_set_empty(&ppa);
  116. pblk_trans_map_set(pblk, lba, ppa);
  117. }
  118. spin_unlock(&pblk->trans_lock);
  119. }
  120. /* Caller must guarantee that the request is a valid type */
  121. struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
  122. {
  123. mempool_t *pool;
  124. struct nvm_rq *rqd;
  125. int rq_size;
  126. switch (type) {
  127. case PBLK_WRITE:
  128. case PBLK_WRITE_INT:
  129. pool = pblk->w_rq_pool;
  130. rq_size = pblk_w_rq_size;
  131. break;
  132. case PBLK_READ:
  133. pool = pblk->r_rq_pool;
  134. rq_size = pblk_g_rq_size;
  135. break;
  136. default:
  137. pool = pblk->e_rq_pool;
  138. rq_size = pblk_g_rq_size;
  139. }
  140. rqd = mempool_alloc(pool, GFP_KERNEL);
  141. memset(rqd, 0, rq_size);
  142. return rqd;
  143. }
  144. /* Typically used on completion path. Cannot guarantee request consistency */
  145. void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
  146. {
  147. struct nvm_tgt_dev *dev = pblk->dev;
  148. mempool_t *pool;
  149. switch (type) {
  150. case PBLK_WRITE:
  151. kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
  152. case PBLK_WRITE_INT:
  153. pool = pblk->w_rq_pool;
  154. break;
  155. case PBLK_READ:
  156. pool = pblk->r_rq_pool;
  157. break;
  158. case PBLK_ERASE:
  159. pool = pblk->e_rq_pool;
  160. break;
  161. default:
  162. pr_err("pblk: trying to free unknown rqd type\n");
  163. return;
  164. }
  165. nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
  166. mempool_free(rqd, pool);
  167. }
  168. void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
  169. int nr_pages)
  170. {
  171. struct bio_vec bv;
  172. int i;
  173. WARN_ON(off + nr_pages != bio->bi_vcnt);
  174. for (i = off; i < nr_pages + off; i++) {
  175. bv = bio->bi_io_vec[i];
  176. mempool_free(bv.bv_page, pblk->page_bio_pool);
  177. }
  178. }
  179. int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
  180. int nr_pages)
  181. {
  182. struct request_queue *q = pblk->dev->q;
  183. struct page *page;
  184. int i, ret;
  185. for (i = 0; i < nr_pages; i++) {
  186. page = mempool_alloc(pblk->page_bio_pool, flags);
  187. ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
  188. if (ret != PBLK_EXPOSED_PAGE_SIZE) {
  189. pr_err("pblk: could not add page to bio\n");
  190. mempool_free(page, pblk->page_bio_pool);
  191. goto err;
  192. }
  193. }
  194. return 0;
  195. err:
  196. pblk_bio_free_pages(pblk, bio, 0, i - 1);
  197. return -1;
  198. }
  199. static void pblk_write_kick(struct pblk *pblk)
  200. {
  201. wake_up_process(pblk->writer_ts);
  202. mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
  203. }
  204. void pblk_write_timer_fn(unsigned long data)
  205. {
  206. struct pblk *pblk = (struct pblk *)data;
  207. /* kick the write thread every tick to flush outstanding data */
  208. pblk_write_kick(pblk);
  209. }
  210. void pblk_write_should_kick(struct pblk *pblk)
  211. {
  212. unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
  213. if (secs_avail >= pblk->min_write_pgs)
  214. pblk_write_kick(pblk);
  215. }
  216. void pblk_end_io_sync(struct nvm_rq *rqd)
  217. {
  218. struct completion *waiting = rqd->private;
  219. complete(waiting);
  220. }
  221. void pblk_wait_for_meta(struct pblk *pblk)
  222. {
  223. do {
  224. if (!atomic_read(&pblk->inflight_io))
  225. break;
  226. schedule();
  227. } while (1);
  228. }
  229. static void pblk_flush_writer(struct pblk *pblk)
  230. {
  231. pblk_rb_flush(&pblk->rwb);
  232. do {
  233. if (!pblk_rb_sync_count(&pblk->rwb))
  234. break;
  235. pblk_write_kick(pblk);
  236. schedule();
  237. } while (1);
  238. }
  239. struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
  240. {
  241. struct pblk_line_meta *lm = &pblk->lm;
  242. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  243. struct list_head *move_list = NULL;
  244. int vsc = le32_to_cpu(*line->vsc);
  245. lockdep_assert_held(&line->lock);
  246. if (!vsc) {
  247. if (line->gc_group != PBLK_LINEGC_FULL) {
  248. line->gc_group = PBLK_LINEGC_FULL;
  249. move_list = &l_mg->gc_full_list;
  250. }
  251. } else if (vsc < lm->high_thrs) {
  252. if (line->gc_group != PBLK_LINEGC_HIGH) {
  253. line->gc_group = PBLK_LINEGC_HIGH;
  254. move_list = &l_mg->gc_high_list;
  255. }
  256. } else if (vsc < lm->mid_thrs) {
  257. if (line->gc_group != PBLK_LINEGC_MID) {
  258. line->gc_group = PBLK_LINEGC_MID;
  259. move_list = &l_mg->gc_mid_list;
  260. }
  261. } else if (vsc < line->sec_in_line) {
  262. if (line->gc_group != PBLK_LINEGC_LOW) {
  263. line->gc_group = PBLK_LINEGC_LOW;
  264. move_list = &l_mg->gc_low_list;
  265. }
  266. } else if (vsc == line->sec_in_line) {
  267. if (line->gc_group != PBLK_LINEGC_EMPTY) {
  268. line->gc_group = PBLK_LINEGC_EMPTY;
  269. move_list = &l_mg->gc_empty_list;
  270. }
  271. } else {
  272. line->state = PBLK_LINESTATE_CORRUPT;
  273. line->gc_group = PBLK_LINEGC_NONE;
  274. move_list = &l_mg->corrupt_list;
  275. pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
  276. line->id, vsc,
  277. line->sec_in_line,
  278. lm->high_thrs, lm->mid_thrs);
  279. }
  280. return move_list;
  281. }
  282. void pblk_discard(struct pblk *pblk, struct bio *bio)
  283. {
  284. sector_t slba = pblk_get_lba(bio);
  285. sector_t nr_secs = pblk_get_secs(bio);
  286. pblk_invalidate_range(pblk, slba, nr_secs);
  287. }
  288. struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
  289. {
  290. struct ppa_addr ppa;
  291. spin_lock(&pblk->trans_lock);
  292. ppa = pblk_trans_map_get(pblk, lba);
  293. spin_unlock(&pblk->trans_lock);
  294. return ppa;
  295. }
  296. void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
  297. {
  298. atomic_long_inc(&pblk->write_failed);
  299. #ifdef CONFIG_NVM_DEBUG
  300. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  301. #endif
  302. }
  303. void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
  304. {
  305. /* Empty page read is not necessarily an error (e.g., L2P recovery) */
  306. if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
  307. atomic_long_inc(&pblk->read_empty);
  308. return;
  309. }
  310. switch (rqd->error) {
  311. case NVM_RSP_WARN_HIGHECC:
  312. atomic_long_inc(&pblk->read_high_ecc);
  313. break;
  314. case NVM_RSP_ERR_FAILECC:
  315. case NVM_RSP_ERR_FAILCRC:
  316. atomic_long_inc(&pblk->read_failed);
  317. break;
  318. default:
  319. pr_err("pblk: unknown read error:%d\n", rqd->error);
  320. }
  321. #ifdef CONFIG_NVM_DEBUG
  322. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  323. #endif
  324. }
  325. void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
  326. {
  327. pblk->sec_per_write = sec_per_write;
  328. }
  329. int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
  330. {
  331. struct nvm_tgt_dev *dev = pblk->dev;
  332. #ifdef CONFIG_NVM_DEBUG
  333. struct ppa_addr *ppa_list;
  334. ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
  335. if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
  336. WARN_ON(1);
  337. return -EINVAL;
  338. }
  339. if (rqd->opcode == NVM_OP_PWRITE) {
  340. struct pblk_line *line;
  341. struct ppa_addr ppa;
  342. int i;
  343. for (i = 0; i < rqd->nr_ppas; i++) {
  344. ppa = ppa_list[i];
  345. line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
  346. spin_lock(&line->lock);
  347. if (line->state != PBLK_LINESTATE_OPEN) {
  348. pr_err("pblk: bad ppa: line:%d,state:%d\n",
  349. line->id, line->state);
  350. WARN_ON(1);
  351. spin_unlock(&line->lock);
  352. return -EINVAL;
  353. }
  354. spin_unlock(&line->lock);
  355. }
  356. }
  357. #endif
  358. atomic_inc(&pblk->inflight_io);
  359. return nvm_submit_io(dev, rqd);
  360. }
  361. static void pblk_bio_map_addr_endio(struct bio *bio)
  362. {
  363. bio_put(bio);
  364. }
  365. struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
  366. unsigned int nr_secs, unsigned int len,
  367. int alloc_type, gfp_t gfp_mask)
  368. {
  369. struct nvm_tgt_dev *dev = pblk->dev;
  370. void *kaddr = data;
  371. struct page *page;
  372. struct bio *bio;
  373. int i, ret;
  374. if (alloc_type == PBLK_KMALLOC_META)
  375. return bio_map_kern(dev->q, kaddr, len, gfp_mask);
  376. bio = bio_kmalloc(gfp_mask, nr_secs);
  377. if (!bio)
  378. return ERR_PTR(-ENOMEM);
  379. for (i = 0; i < nr_secs; i++) {
  380. page = vmalloc_to_page(kaddr);
  381. if (!page) {
  382. pr_err("pblk: could not map vmalloc bio\n");
  383. bio_put(bio);
  384. bio = ERR_PTR(-ENOMEM);
  385. goto out;
  386. }
  387. ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
  388. if (ret != PAGE_SIZE) {
  389. pr_err("pblk: could not add page to bio\n");
  390. bio_put(bio);
  391. bio = ERR_PTR(-ENOMEM);
  392. goto out;
  393. }
  394. kaddr += PAGE_SIZE;
  395. }
  396. bio->bi_end_io = pblk_bio_map_addr_endio;
  397. out:
  398. return bio;
  399. }
  400. int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
  401. unsigned long secs_to_flush)
  402. {
  403. int max = pblk->sec_per_write;
  404. int min = pblk->min_write_pgs;
  405. int secs_to_sync = 0;
  406. if (secs_avail >= max)
  407. secs_to_sync = max;
  408. else if (secs_avail >= min)
  409. secs_to_sync = min * (secs_avail / min);
  410. else if (secs_to_flush)
  411. secs_to_sync = min;
  412. return secs_to_sync;
  413. }
  414. void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  415. {
  416. u64 addr;
  417. int i;
  418. spin_lock(&line->lock);
  419. addr = find_next_zero_bit(line->map_bitmap,
  420. pblk->lm.sec_per_line, line->cur_sec);
  421. line->cur_sec = addr - nr_secs;
  422. for (i = 0; i < nr_secs; i++, line->cur_sec--)
  423. WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
  424. spin_unlock(&line->lock);
  425. }
  426. u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  427. {
  428. u64 addr;
  429. int i;
  430. lockdep_assert_held(&line->lock);
  431. /* logic error: ppa out-of-bounds. Prevent generating bad address */
  432. if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
  433. WARN(1, "pblk: page allocation out of bounds\n");
  434. nr_secs = pblk->lm.sec_per_line - line->cur_sec;
  435. }
  436. line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
  437. pblk->lm.sec_per_line, line->cur_sec);
  438. for (i = 0; i < nr_secs; i++, line->cur_sec++)
  439. WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
  440. return addr;
  441. }
  442. u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  443. {
  444. u64 addr;
  445. /* Lock needed in case a write fails and a recovery needs to remap
  446. * failed write buffer entries
  447. */
  448. spin_lock(&line->lock);
  449. addr = __pblk_alloc_page(pblk, line, nr_secs);
  450. line->left_msecs -= nr_secs;
  451. WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
  452. spin_unlock(&line->lock);
  453. return addr;
  454. }
  455. u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
  456. {
  457. u64 paddr;
  458. spin_lock(&line->lock);
  459. paddr = find_next_zero_bit(line->map_bitmap,
  460. pblk->lm.sec_per_line, line->cur_sec);
  461. spin_unlock(&line->lock);
  462. return paddr;
  463. }
  464. /*
  465. * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
  466. * taking the per LUN semaphore.
  467. */
  468. static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
  469. void *emeta_buf, u64 paddr, int dir)
  470. {
  471. struct nvm_tgt_dev *dev = pblk->dev;
  472. struct nvm_geo *geo = &dev->geo;
  473. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  474. struct pblk_line_meta *lm = &pblk->lm;
  475. void *ppa_list, *meta_list;
  476. struct bio *bio;
  477. struct nvm_rq rqd;
  478. dma_addr_t dma_ppa_list, dma_meta_list;
  479. int min = pblk->min_write_pgs;
  480. int left_ppas = lm->emeta_sec[0];
  481. int id = line->id;
  482. int rq_ppas, rq_len;
  483. int cmd_op, bio_op;
  484. int i, j;
  485. int ret;
  486. DECLARE_COMPLETION_ONSTACK(wait);
  487. if (dir == PBLK_WRITE) {
  488. bio_op = REQ_OP_WRITE;
  489. cmd_op = NVM_OP_PWRITE;
  490. } else if (dir == PBLK_READ) {
  491. bio_op = REQ_OP_READ;
  492. cmd_op = NVM_OP_PREAD;
  493. } else
  494. return -EINVAL;
  495. meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  496. &dma_meta_list);
  497. if (!meta_list)
  498. return -ENOMEM;
  499. ppa_list = meta_list + pblk_dma_meta_size;
  500. dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
  501. next_rq:
  502. memset(&rqd, 0, sizeof(struct nvm_rq));
  503. rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
  504. rq_len = rq_ppas * geo->sec_size;
  505. bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
  506. l_mg->emeta_alloc_type, GFP_KERNEL);
  507. if (IS_ERR(bio)) {
  508. ret = PTR_ERR(bio);
  509. goto free_rqd_dma;
  510. }
  511. bio->bi_iter.bi_sector = 0; /* internal bio */
  512. bio_set_op_attrs(bio, bio_op, 0);
  513. rqd.bio = bio;
  514. rqd.meta_list = meta_list;
  515. rqd.ppa_list = ppa_list;
  516. rqd.dma_meta_list = dma_meta_list;
  517. rqd.dma_ppa_list = dma_ppa_list;
  518. rqd.opcode = cmd_op;
  519. rqd.nr_ppas = rq_ppas;
  520. rqd.end_io = pblk_end_io_sync;
  521. rqd.private = &wait;
  522. if (dir == PBLK_WRITE) {
  523. struct pblk_sec_meta *meta_list = rqd.meta_list;
  524. rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
  525. for (i = 0; i < rqd.nr_ppas; ) {
  526. spin_lock(&line->lock);
  527. paddr = __pblk_alloc_page(pblk, line, min);
  528. spin_unlock(&line->lock);
  529. for (j = 0; j < min; j++, i++, paddr++) {
  530. meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
  531. rqd.ppa_list[i] =
  532. addr_to_gen_ppa(pblk, paddr, id);
  533. }
  534. }
  535. } else {
  536. for (i = 0; i < rqd.nr_ppas; ) {
  537. struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
  538. int pos = pblk_dev_ppa_to_pos(geo, ppa);
  539. int read_type = PBLK_READ_RANDOM;
  540. if (pblk_io_aligned(pblk, rq_ppas))
  541. read_type = PBLK_READ_SEQUENTIAL;
  542. rqd.flags = pblk_set_read_mode(pblk, read_type);
  543. while (test_bit(pos, line->blk_bitmap)) {
  544. paddr += min;
  545. if (pblk_boundary_paddr_checks(pblk, paddr)) {
  546. pr_err("pblk: corrupt emeta line:%d\n",
  547. line->id);
  548. bio_put(bio);
  549. ret = -EINTR;
  550. goto free_rqd_dma;
  551. }
  552. ppa = addr_to_gen_ppa(pblk, paddr, id);
  553. pos = pblk_dev_ppa_to_pos(geo, ppa);
  554. }
  555. if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
  556. pr_err("pblk: corrupt emeta line:%d\n",
  557. line->id);
  558. bio_put(bio);
  559. ret = -EINTR;
  560. goto free_rqd_dma;
  561. }
  562. for (j = 0; j < min; j++, i++, paddr++)
  563. rqd.ppa_list[i] =
  564. addr_to_gen_ppa(pblk, paddr, line->id);
  565. }
  566. }
  567. ret = pblk_submit_io(pblk, &rqd);
  568. if (ret) {
  569. pr_err("pblk: emeta I/O submission failed: %d\n", ret);
  570. bio_put(bio);
  571. goto free_rqd_dma;
  572. }
  573. if (!wait_for_completion_io_timeout(&wait,
  574. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  575. pr_err("pblk: emeta I/O timed out\n");
  576. }
  577. atomic_dec(&pblk->inflight_io);
  578. reinit_completion(&wait);
  579. if (rqd.error) {
  580. if (dir == PBLK_WRITE)
  581. pblk_log_write_err(pblk, &rqd);
  582. else
  583. pblk_log_read_err(pblk, &rqd);
  584. }
  585. emeta_buf += rq_len;
  586. left_ppas -= rq_ppas;
  587. if (left_ppas)
  588. goto next_rq;
  589. free_rqd_dma:
  590. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  591. return ret;
  592. }
  593. u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
  594. {
  595. struct nvm_tgt_dev *dev = pblk->dev;
  596. struct nvm_geo *geo = &dev->geo;
  597. struct pblk_line_meta *lm = &pblk->lm;
  598. int bit;
  599. /* This usually only happens on bad lines */
  600. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  601. if (bit >= lm->blk_per_line)
  602. return -1;
  603. return bit * geo->sec_per_pl;
  604. }
  605. static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
  606. u64 paddr, int dir)
  607. {
  608. struct nvm_tgt_dev *dev = pblk->dev;
  609. struct pblk_line_meta *lm = &pblk->lm;
  610. struct bio *bio;
  611. struct nvm_rq rqd;
  612. __le64 *lba_list = NULL;
  613. int i, ret;
  614. int cmd_op, bio_op;
  615. int flags;
  616. DECLARE_COMPLETION_ONSTACK(wait);
  617. if (dir == PBLK_WRITE) {
  618. bio_op = REQ_OP_WRITE;
  619. cmd_op = NVM_OP_PWRITE;
  620. flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
  621. lba_list = emeta_to_lbas(pblk, line->emeta->buf);
  622. } else if (dir == PBLK_READ) {
  623. bio_op = REQ_OP_READ;
  624. cmd_op = NVM_OP_PREAD;
  625. flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
  626. } else
  627. return -EINVAL;
  628. memset(&rqd, 0, sizeof(struct nvm_rq));
  629. rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  630. &rqd.dma_meta_list);
  631. if (!rqd.meta_list)
  632. return -ENOMEM;
  633. rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
  634. rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
  635. bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
  636. if (IS_ERR(bio)) {
  637. ret = PTR_ERR(bio);
  638. goto free_ppa_list;
  639. }
  640. bio->bi_iter.bi_sector = 0; /* internal bio */
  641. bio_set_op_attrs(bio, bio_op, 0);
  642. rqd.bio = bio;
  643. rqd.opcode = cmd_op;
  644. rqd.flags = flags;
  645. rqd.nr_ppas = lm->smeta_sec;
  646. rqd.end_io = pblk_end_io_sync;
  647. rqd.private = &wait;
  648. for (i = 0; i < lm->smeta_sec; i++, paddr++) {
  649. struct pblk_sec_meta *meta_list = rqd.meta_list;
  650. rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
  651. if (dir == PBLK_WRITE) {
  652. __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
  653. meta_list[i].lba = lba_list[paddr] = addr_empty;
  654. }
  655. }
  656. /*
  657. * This I/O is sent by the write thread when a line is replace. Since
  658. * the write thread is the only one sending write and erase commands,
  659. * there is no need to take the LUN semaphore.
  660. */
  661. ret = pblk_submit_io(pblk, &rqd);
  662. if (ret) {
  663. pr_err("pblk: smeta I/O submission failed: %d\n", ret);
  664. bio_put(bio);
  665. goto free_ppa_list;
  666. }
  667. if (!wait_for_completion_io_timeout(&wait,
  668. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  669. pr_err("pblk: smeta I/O timed out\n");
  670. }
  671. atomic_dec(&pblk->inflight_io);
  672. if (rqd.error) {
  673. if (dir == PBLK_WRITE)
  674. pblk_log_write_err(pblk, &rqd);
  675. else
  676. pblk_log_read_err(pblk, &rqd);
  677. }
  678. free_ppa_list:
  679. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  680. return ret;
  681. }
  682. int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
  683. {
  684. u64 bpaddr = pblk_line_smeta_start(pblk, line);
  685. return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ);
  686. }
  687. int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
  688. void *emeta_buf)
  689. {
  690. return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
  691. line->emeta_ssec, PBLK_READ);
  692. }
  693. static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
  694. struct ppa_addr ppa)
  695. {
  696. rqd->opcode = NVM_OP_ERASE;
  697. rqd->ppa_addr = ppa;
  698. rqd->nr_ppas = 1;
  699. rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
  700. rqd->bio = NULL;
  701. }
  702. static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
  703. {
  704. struct nvm_rq rqd;
  705. int ret = 0;
  706. DECLARE_COMPLETION_ONSTACK(wait);
  707. memset(&rqd, 0, sizeof(struct nvm_rq));
  708. pblk_setup_e_rq(pblk, &rqd, ppa);
  709. rqd.end_io = pblk_end_io_sync;
  710. rqd.private = &wait;
  711. /* The write thread schedules erases so that it minimizes disturbances
  712. * with writes. Thus, there is no need to take the LUN semaphore.
  713. */
  714. ret = pblk_submit_io(pblk, &rqd);
  715. if (ret) {
  716. struct nvm_tgt_dev *dev = pblk->dev;
  717. struct nvm_geo *geo = &dev->geo;
  718. pr_err("pblk: could not sync erase line:%d,blk:%d\n",
  719. pblk_dev_ppa_to_line(ppa),
  720. pblk_dev_ppa_to_pos(geo, ppa));
  721. rqd.error = ret;
  722. goto out;
  723. }
  724. if (!wait_for_completion_io_timeout(&wait,
  725. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  726. pr_err("pblk: sync erase timed out\n");
  727. }
  728. out:
  729. rqd.private = pblk;
  730. __pblk_end_io_erase(pblk, &rqd);
  731. return ret;
  732. }
  733. int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
  734. {
  735. struct pblk_line_meta *lm = &pblk->lm;
  736. struct ppa_addr ppa;
  737. int ret, bit = -1;
  738. /* Erase only good blocks, one at a time */
  739. do {
  740. spin_lock(&line->lock);
  741. bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
  742. bit + 1);
  743. if (bit >= lm->blk_per_line) {
  744. spin_unlock(&line->lock);
  745. break;
  746. }
  747. ppa = pblk->luns[bit].bppa; /* set ch and lun */
  748. ppa.g.blk = line->id;
  749. atomic_dec(&line->left_eblks);
  750. WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
  751. spin_unlock(&line->lock);
  752. ret = pblk_blk_erase_sync(pblk, ppa);
  753. if (ret) {
  754. pr_err("pblk: failed to erase line %d\n", line->id);
  755. return ret;
  756. }
  757. } while (1);
  758. return 0;
  759. }
  760. static void pblk_line_setup_metadata(struct pblk_line *line,
  761. struct pblk_line_mgmt *l_mg,
  762. struct pblk_line_meta *lm)
  763. {
  764. int meta_line;
  765. lockdep_assert_held(&l_mg->free_lock);
  766. retry_meta:
  767. meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  768. if (meta_line == PBLK_DATA_LINES) {
  769. spin_unlock(&l_mg->free_lock);
  770. io_schedule();
  771. spin_lock(&l_mg->free_lock);
  772. goto retry_meta;
  773. }
  774. set_bit(meta_line, &l_mg->meta_bitmap);
  775. line->meta_line = meta_line;
  776. line->smeta = l_mg->sline_meta[meta_line];
  777. line->emeta = l_mg->eline_meta[meta_line];
  778. memset(line->smeta, 0, lm->smeta_len);
  779. memset(line->emeta->buf, 0, lm->emeta_len[0]);
  780. line->emeta->mem = 0;
  781. atomic_set(&line->emeta->sync, 0);
  782. }
  783. /* For now lines are always assumed full lines. Thus, smeta former and current
  784. * lun bitmaps are omitted.
  785. */
  786. static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
  787. struct pblk_line *cur)
  788. {
  789. struct nvm_tgt_dev *dev = pblk->dev;
  790. struct nvm_geo *geo = &dev->geo;
  791. struct pblk_line_meta *lm = &pblk->lm;
  792. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  793. struct pblk_emeta *emeta = line->emeta;
  794. struct line_emeta *emeta_buf = emeta->buf;
  795. struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
  796. int nr_blk_line;
  797. /* After erasing the line, new bad blocks might appear and we risk
  798. * having an invalid line
  799. */
  800. nr_blk_line = lm->blk_per_line -
  801. bitmap_weight(line->blk_bitmap, lm->blk_per_line);
  802. if (nr_blk_line < lm->min_blk_line) {
  803. spin_lock(&l_mg->free_lock);
  804. spin_lock(&line->lock);
  805. line->state = PBLK_LINESTATE_BAD;
  806. spin_unlock(&line->lock);
  807. list_add_tail(&line->list, &l_mg->bad_list);
  808. spin_unlock(&l_mg->free_lock);
  809. pr_debug("pblk: line %d is bad\n", line->id);
  810. return 0;
  811. }
  812. /* Run-time metadata */
  813. line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
  814. /* Mark LUNs allocated in this line (all for now) */
  815. bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
  816. smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
  817. memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
  818. smeta_buf->header.id = cpu_to_le32(line->id);
  819. smeta_buf->header.type = cpu_to_le16(line->type);
  820. smeta_buf->header.version = SMETA_VERSION;
  821. /* Start metadata */
  822. smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
  823. smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
  824. /* Fill metadata among lines */
  825. if (cur) {
  826. memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
  827. smeta_buf->prev_id = cpu_to_le32(cur->id);
  828. cur->emeta->buf->next_id = cpu_to_le32(line->id);
  829. } else {
  830. smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
  831. }
  832. /* All smeta must be set at this point */
  833. smeta_buf->header.crc = cpu_to_le32(
  834. pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
  835. smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
  836. /* End metadata */
  837. memcpy(&emeta_buf->header, &smeta_buf->header,
  838. sizeof(struct line_header));
  839. emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
  840. emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
  841. emeta_buf->nr_valid_lbas = cpu_to_le64(0);
  842. emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
  843. emeta_buf->crc = cpu_to_le32(0);
  844. emeta_buf->prev_id = smeta_buf->prev_id;
  845. return 1;
  846. }
  847. /* For now lines are always assumed full lines. Thus, smeta former and current
  848. * lun bitmaps are omitted.
  849. */
  850. static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
  851. int init)
  852. {
  853. struct nvm_tgt_dev *dev = pblk->dev;
  854. struct nvm_geo *geo = &dev->geo;
  855. struct pblk_line_meta *lm = &pblk->lm;
  856. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  857. int nr_bb = 0;
  858. u64 off;
  859. int bit = -1;
  860. line->sec_in_line = lm->sec_per_line;
  861. /* Capture bad block information on line mapping bitmaps */
  862. while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
  863. bit + 1)) < lm->blk_per_line) {
  864. off = bit * geo->sec_per_pl;
  865. bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
  866. lm->sec_per_line);
  867. bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
  868. lm->sec_per_line);
  869. line->sec_in_line -= geo->sec_per_blk;
  870. if (bit >= lm->emeta_bb)
  871. nr_bb++;
  872. }
  873. /* Mark smeta metadata sectors as bad sectors */
  874. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  875. off = bit * geo->sec_per_pl;
  876. bitmap_set(line->map_bitmap, off, lm->smeta_sec);
  877. line->sec_in_line -= lm->smeta_sec;
  878. line->smeta_ssec = off;
  879. line->cur_sec = off + lm->smeta_sec;
  880. if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
  881. pr_debug("pblk: line smeta I/O failed. Retry\n");
  882. return 1;
  883. }
  884. bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
  885. /* Mark emeta metadata sectors as bad sectors. We need to consider bad
  886. * blocks to make sure that there are enough sectors to store emeta
  887. */
  888. bit = lm->sec_per_line;
  889. off = lm->sec_per_line - lm->emeta_sec[0];
  890. bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
  891. while (nr_bb) {
  892. off -= geo->sec_per_pl;
  893. if (!test_bit(off, line->invalid_bitmap)) {
  894. bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
  895. nr_bb--;
  896. }
  897. }
  898. line->sec_in_line -= lm->emeta_sec[0];
  899. line->emeta_ssec = off;
  900. line->nr_valid_lbas = 0;
  901. line->left_msecs = line->sec_in_line;
  902. *line->vsc = cpu_to_le32(line->sec_in_line);
  903. if (lm->sec_per_line - line->sec_in_line !=
  904. bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
  905. spin_lock(&line->lock);
  906. line->state = PBLK_LINESTATE_BAD;
  907. spin_unlock(&line->lock);
  908. list_add_tail(&line->list, &l_mg->bad_list);
  909. pr_err("pblk: unexpected line %d is bad\n", line->id);
  910. return 0;
  911. }
  912. return 1;
  913. }
  914. static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
  915. {
  916. struct pblk_line_meta *lm = &pblk->lm;
  917. int blk_in_line = atomic_read(&line->blk_in_line);
  918. line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_ATOMIC);
  919. if (!line->map_bitmap)
  920. return -ENOMEM;
  921. /* will be initialized using bb info from map_bitmap */
  922. line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_ATOMIC);
  923. if (!line->invalid_bitmap) {
  924. kfree(line->map_bitmap);
  925. return -ENOMEM;
  926. }
  927. spin_lock(&line->lock);
  928. if (line->state != PBLK_LINESTATE_FREE) {
  929. kfree(line->map_bitmap);
  930. kfree(line->invalid_bitmap);
  931. spin_unlock(&line->lock);
  932. WARN(1, "pblk: corrupted line %d, state %d\n",
  933. line->id, line->state);
  934. return -EAGAIN;
  935. }
  936. line->state = PBLK_LINESTATE_OPEN;
  937. atomic_set(&line->left_eblks, blk_in_line);
  938. atomic_set(&line->left_seblks, blk_in_line);
  939. line->meta_distance = lm->meta_distance;
  940. spin_unlock(&line->lock);
  941. /* Bad blocks do not need to be erased */
  942. bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
  943. kref_init(&line->ref);
  944. return 0;
  945. }
  946. int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
  947. {
  948. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  949. int ret;
  950. spin_lock(&l_mg->free_lock);
  951. l_mg->data_line = line;
  952. list_del(&line->list);
  953. ret = pblk_line_prepare(pblk, line);
  954. if (ret) {
  955. list_add(&line->list, &l_mg->free_list);
  956. spin_unlock(&l_mg->free_lock);
  957. return ret;
  958. }
  959. spin_unlock(&l_mg->free_lock);
  960. pblk_rl_free_lines_dec(&pblk->rl, line);
  961. if (!pblk_line_init_bb(pblk, line, 0)) {
  962. list_add(&line->list, &l_mg->free_list);
  963. return -EINTR;
  964. }
  965. return 0;
  966. }
  967. void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
  968. {
  969. kfree(line->map_bitmap);
  970. line->map_bitmap = NULL;
  971. line->smeta = NULL;
  972. line->emeta = NULL;
  973. }
  974. struct pblk_line *pblk_line_get(struct pblk *pblk)
  975. {
  976. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  977. struct pblk_line_meta *lm = &pblk->lm;
  978. struct pblk_line *line;
  979. int ret, bit;
  980. lockdep_assert_held(&l_mg->free_lock);
  981. retry:
  982. if (list_empty(&l_mg->free_list)) {
  983. pr_err("pblk: no free lines\n");
  984. return NULL;
  985. }
  986. line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
  987. list_del(&line->list);
  988. l_mg->nr_free_lines--;
  989. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  990. if (unlikely(bit >= lm->blk_per_line)) {
  991. spin_lock(&line->lock);
  992. line->state = PBLK_LINESTATE_BAD;
  993. spin_unlock(&line->lock);
  994. list_add_tail(&line->list, &l_mg->bad_list);
  995. pr_debug("pblk: line %d is bad\n", line->id);
  996. goto retry;
  997. }
  998. ret = pblk_line_prepare(pblk, line);
  999. if (ret) {
  1000. if (ret == -EAGAIN) {
  1001. list_add(&line->list, &l_mg->corrupt_list);
  1002. goto retry;
  1003. } else {
  1004. pr_err("pblk: failed to prepare line %d\n", line->id);
  1005. list_add(&line->list, &l_mg->free_list);
  1006. l_mg->nr_free_lines++;
  1007. return NULL;
  1008. }
  1009. }
  1010. return line;
  1011. }
  1012. static struct pblk_line *pblk_line_retry(struct pblk *pblk,
  1013. struct pblk_line *line)
  1014. {
  1015. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1016. struct pblk_line *retry_line;
  1017. retry:
  1018. spin_lock(&l_mg->free_lock);
  1019. retry_line = pblk_line_get(pblk);
  1020. if (!retry_line) {
  1021. l_mg->data_line = NULL;
  1022. spin_unlock(&l_mg->free_lock);
  1023. return NULL;
  1024. }
  1025. retry_line->smeta = line->smeta;
  1026. retry_line->emeta = line->emeta;
  1027. retry_line->meta_line = line->meta_line;
  1028. pblk_line_free(pblk, line);
  1029. l_mg->data_line = retry_line;
  1030. spin_unlock(&l_mg->free_lock);
  1031. pblk_rl_free_lines_dec(&pblk->rl, retry_line);
  1032. if (pblk_line_erase(pblk, retry_line))
  1033. goto retry;
  1034. return retry_line;
  1035. }
  1036. static void pblk_set_space_limit(struct pblk *pblk)
  1037. {
  1038. struct pblk_rl *rl = &pblk->rl;
  1039. atomic_set(&rl->rb_space, 0);
  1040. }
  1041. struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
  1042. {
  1043. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1044. struct pblk_line *line;
  1045. int is_next = 0;
  1046. spin_lock(&l_mg->free_lock);
  1047. line = pblk_line_get(pblk);
  1048. if (!line) {
  1049. spin_unlock(&l_mg->free_lock);
  1050. return NULL;
  1051. }
  1052. line->seq_nr = l_mg->d_seq_nr++;
  1053. line->type = PBLK_LINETYPE_DATA;
  1054. l_mg->data_line = line;
  1055. pblk_line_setup_metadata(line, l_mg, &pblk->lm);
  1056. /* Allocate next line for preparation */
  1057. l_mg->data_next = pblk_line_get(pblk);
  1058. if (!l_mg->data_next) {
  1059. /* If we cannot get a new line, we need to stop the pipeline.
  1060. * Only allow as many writes in as we can store safely and then
  1061. * fail gracefully
  1062. */
  1063. pblk_set_space_limit(pblk);
  1064. l_mg->data_next = NULL;
  1065. } else {
  1066. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1067. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1068. is_next = 1;
  1069. }
  1070. spin_unlock(&l_mg->free_lock);
  1071. if (pblk_line_erase(pblk, line)) {
  1072. line = pblk_line_retry(pblk, line);
  1073. if (!line)
  1074. return NULL;
  1075. }
  1076. pblk_rl_free_lines_dec(&pblk->rl, line);
  1077. if (is_next)
  1078. pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
  1079. retry_setup:
  1080. if (!pblk_line_init_metadata(pblk, line, NULL)) {
  1081. line = pblk_line_retry(pblk, line);
  1082. if (!line)
  1083. return NULL;
  1084. goto retry_setup;
  1085. }
  1086. if (!pblk_line_init_bb(pblk, line, 1)) {
  1087. line = pblk_line_retry(pblk, line);
  1088. if (!line)
  1089. return NULL;
  1090. goto retry_setup;
  1091. }
  1092. return line;
  1093. }
  1094. static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
  1095. {
  1096. lockdep_assert_held(&pblk->l_mg.free_lock);
  1097. pblk_set_space_limit(pblk);
  1098. pblk->state = PBLK_STATE_STOPPING;
  1099. }
  1100. void pblk_pipeline_stop(struct pblk *pblk)
  1101. {
  1102. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1103. int ret;
  1104. spin_lock(&l_mg->free_lock);
  1105. if (pblk->state == PBLK_STATE_RECOVERING ||
  1106. pblk->state == PBLK_STATE_STOPPED) {
  1107. spin_unlock(&l_mg->free_lock);
  1108. return;
  1109. }
  1110. pblk->state = PBLK_STATE_RECOVERING;
  1111. spin_unlock(&l_mg->free_lock);
  1112. pblk_flush_writer(pblk);
  1113. pblk_wait_for_meta(pblk);
  1114. ret = pblk_recov_pad(pblk);
  1115. if (ret) {
  1116. pr_err("pblk: could not close data on teardown(%d)\n", ret);
  1117. return;
  1118. }
  1119. flush_workqueue(pblk->bb_wq);
  1120. pblk_line_close_meta_sync(pblk);
  1121. spin_lock(&l_mg->free_lock);
  1122. pblk->state = PBLK_STATE_STOPPED;
  1123. l_mg->data_line = NULL;
  1124. l_mg->data_next = NULL;
  1125. spin_unlock(&l_mg->free_lock);
  1126. }
  1127. struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
  1128. {
  1129. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1130. struct pblk_line *cur, *new = NULL;
  1131. unsigned int left_seblks;
  1132. int is_next = 0;
  1133. cur = l_mg->data_line;
  1134. new = l_mg->data_next;
  1135. if (!new)
  1136. goto out;
  1137. l_mg->data_line = new;
  1138. spin_lock(&l_mg->free_lock);
  1139. if (pblk->state != PBLK_STATE_RUNNING) {
  1140. l_mg->data_line = NULL;
  1141. l_mg->data_next = NULL;
  1142. spin_unlock(&l_mg->free_lock);
  1143. goto out;
  1144. }
  1145. pblk_line_setup_metadata(new, l_mg, &pblk->lm);
  1146. spin_unlock(&l_mg->free_lock);
  1147. retry_erase:
  1148. left_seblks = atomic_read(&new->left_seblks);
  1149. if (left_seblks) {
  1150. /* If line is not fully erased, erase it */
  1151. if (atomic_read(&new->left_eblks)) {
  1152. if (pblk_line_erase(pblk, new))
  1153. goto out;
  1154. } else {
  1155. io_schedule();
  1156. }
  1157. goto retry_erase;
  1158. }
  1159. retry_setup:
  1160. if (!pblk_line_init_metadata(pblk, new, cur)) {
  1161. new = pblk_line_retry(pblk, new);
  1162. if (!new)
  1163. goto out;
  1164. goto retry_setup;
  1165. }
  1166. if (!pblk_line_init_bb(pblk, new, 1)) {
  1167. new = pblk_line_retry(pblk, new);
  1168. if (!new)
  1169. goto out;
  1170. goto retry_setup;
  1171. }
  1172. /* Allocate next line for preparation */
  1173. spin_lock(&l_mg->free_lock);
  1174. l_mg->data_next = pblk_line_get(pblk);
  1175. if (!l_mg->data_next) {
  1176. /* If we cannot get a new line, we need to stop the pipeline.
  1177. * Only allow as many writes in as we can store safely and then
  1178. * fail gracefully
  1179. */
  1180. pblk_stop_writes(pblk, new);
  1181. l_mg->data_next = NULL;
  1182. } else {
  1183. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1184. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1185. is_next = 1;
  1186. }
  1187. spin_unlock(&l_mg->free_lock);
  1188. if (is_next)
  1189. pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
  1190. out:
  1191. return new;
  1192. }
  1193. void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
  1194. {
  1195. kfree(line->map_bitmap);
  1196. kfree(line->invalid_bitmap);
  1197. *line->vsc = cpu_to_le32(EMPTY_ENTRY);
  1198. line->map_bitmap = NULL;
  1199. line->invalid_bitmap = NULL;
  1200. line->smeta = NULL;
  1201. line->emeta = NULL;
  1202. }
  1203. static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
  1204. {
  1205. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1206. spin_lock(&line->lock);
  1207. WARN_ON(line->state != PBLK_LINESTATE_GC);
  1208. line->state = PBLK_LINESTATE_FREE;
  1209. line->gc_group = PBLK_LINEGC_NONE;
  1210. pblk_line_free(pblk, line);
  1211. spin_unlock(&line->lock);
  1212. spin_lock(&l_mg->free_lock);
  1213. list_add_tail(&line->list, &l_mg->free_list);
  1214. l_mg->nr_free_lines++;
  1215. spin_unlock(&l_mg->free_lock);
  1216. pblk_rl_free_lines_inc(&pblk->rl, line);
  1217. }
  1218. static void pblk_line_put_ws(struct work_struct *work)
  1219. {
  1220. struct pblk_line_ws *line_put_ws = container_of(work,
  1221. struct pblk_line_ws, ws);
  1222. struct pblk *pblk = line_put_ws->pblk;
  1223. struct pblk_line *line = line_put_ws->line;
  1224. __pblk_line_put(pblk, line);
  1225. mempool_free(line_put_ws, pblk->gen_ws_pool);
  1226. }
  1227. void pblk_line_put(struct kref *ref)
  1228. {
  1229. struct pblk_line *line = container_of(ref, struct pblk_line, ref);
  1230. struct pblk *pblk = line->pblk;
  1231. __pblk_line_put(pblk, line);
  1232. }
  1233. void pblk_line_put_wq(struct kref *ref)
  1234. {
  1235. struct pblk_line *line = container_of(ref, struct pblk_line, ref);
  1236. struct pblk *pblk = line->pblk;
  1237. struct pblk_line_ws *line_put_ws;
  1238. line_put_ws = mempool_alloc(pblk->gen_ws_pool, GFP_ATOMIC);
  1239. if (!line_put_ws)
  1240. return;
  1241. line_put_ws->pblk = pblk;
  1242. line_put_ws->line = line;
  1243. line_put_ws->priv = NULL;
  1244. INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
  1245. queue_work(pblk->r_end_wq, &line_put_ws->ws);
  1246. }
  1247. int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
  1248. {
  1249. struct nvm_rq *rqd;
  1250. int err;
  1251. rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
  1252. pblk_setup_e_rq(pblk, rqd, ppa);
  1253. rqd->end_io = pblk_end_io_erase;
  1254. rqd->private = pblk;
  1255. /* The write thread schedules erases so that it minimizes disturbances
  1256. * with writes. Thus, there is no need to take the LUN semaphore.
  1257. */
  1258. err = pblk_submit_io(pblk, rqd);
  1259. if (err) {
  1260. struct nvm_tgt_dev *dev = pblk->dev;
  1261. struct nvm_geo *geo = &dev->geo;
  1262. pr_err("pblk: could not async erase line:%d,blk:%d\n",
  1263. pblk_dev_ppa_to_line(ppa),
  1264. pblk_dev_ppa_to_pos(geo, ppa));
  1265. }
  1266. return err;
  1267. }
  1268. struct pblk_line *pblk_line_get_data(struct pblk *pblk)
  1269. {
  1270. return pblk->l_mg.data_line;
  1271. }
  1272. /* For now, always erase next line */
  1273. struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
  1274. {
  1275. return pblk->l_mg.data_next;
  1276. }
  1277. int pblk_line_is_full(struct pblk_line *line)
  1278. {
  1279. return (line->left_msecs == 0);
  1280. }
  1281. void pblk_line_close_meta_sync(struct pblk *pblk)
  1282. {
  1283. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1284. struct pblk_line_meta *lm = &pblk->lm;
  1285. struct pblk_line *line, *tline;
  1286. LIST_HEAD(list);
  1287. spin_lock(&l_mg->close_lock);
  1288. if (list_empty(&l_mg->emeta_list)) {
  1289. spin_unlock(&l_mg->close_lock);
  1290. return;
  1291. }
  1292. list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
  1293. spin_unlock(&l_mg->close_lock);
  1294. list_for_each_entry_safe(line, tline, &list, list) {
  1295. struct pblk_emeta *emeta = line->emeta;
  1296. while (emeta->mem < lm->emeta_len[0]) {
  1297. int ret;
  1298. ret = pblk_submit_meta_io(pblk, line);
  1299. if (ret) {
  1300. pr_err("pblk: sync meta line %d failed (%d)\n",
  1301. line->id, ret);
  1302. return;
  1303. }
  1304. }
  1305. }
  1306. pblk_wait_for_meta(pblk);
  1307. flush_workqueue(pblk->close_wq);
  1308. }
  1309. static void pblk_line_should_sync_meta(struct pblk *pblk)
  1310. {
  1311. if (pblk_rl_is_limit(&pblk->rl))
  1312. pblk_line_close_meta_sync(pblk);
  1313. }
  1314. void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
  1315. {
  1316. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1317. struct list_head *move_list;
  1318. #ifdef CONFIG_NVM_DEBUG
  1319. struct pblk_line_meta *lm = &pblk->lm;
  1320. WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
  1321. "pblk: corrupt closed line %d\n", line->id);
  1322. #endif
  1323. spin_lock(&l_mg->free_lock);
  1324. WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
  1325. spin_unlock(&l_mg->free_lock);
  1326. spin_lock(&l_mg->gc_lock);
  1327. spin_lock(&line->lock);
  1328. WARN_ON(line->state != PBLK_LINESTATE_OPEN);
  1329. line->state = PBLK_LINESTATE_CLOSED;
  1330. move_list = pblk_line_gc_list(pblk, line);
  1331. list_add_tail(&line->list, move_list);
  1332. kfree(line->map_bitmap);
  1333. line->map_bitmap = NULL;
  1334. line->smeta = NULL;
  1335. line->emeta = NULL;
  1336. spin_unlock(&line->lock);
  1337. spin_unlock(&l_mg->gc_lock);
  1338. }
  1339. void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
  1340. {
  1341. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1342. struct pblk_line_meta *lm = &pblk->lm;
  1343. struct pblk_emeta *emeta = line->emeta;
  1344. struct line_emeta *emeta_buf = emeta->buf;
  1345. /* No need for exact vsc value; avoid a big line lock and take aprox. */
  1346. memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
  1347. memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
  1348. emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
  1349. emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
  1350. spin_lock(&l_mg->close_lock);
  1351. spin_lock(&line->lock);
  1352. list_add_tail(&line->list, &l_mg->emeta_list);
  1353. spin_unlock(&line->lock);
  1354. spin_unlock(&l_mg->close_lock);
  1355. pblk_line_should_sync_meta(pblk);
  1356. }
  1357. void pblk_line_close_ws(struct work_struct *work)
  1358. {
  1359. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  1360. ws);
  1361. struct pblk *pblk = line_ws->pblk;
  1362. struct pblk_line *line = line_ws->line;
  1363. pblk_line_close(pblk, line);
  1364. mempool_free(line_ws, pblk->gen_ws_pool);
  1365. }
  1366. void pblk_line_mark_bb(struct work_struct *work)
  1367. {
  1368. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  1369. ws);
  1370. struct pblk *pblk = line_ws->pblk;
  1371. struct nvm_tgt_dev *dev = pblk->dev;
  1372. struct ppa_addr *ppa = line_ws->priv;
  1373. int ret;
  1374. ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
  1375. if (ret) {
  1376. struct pblk_line *line;
  1377. int pos;
  1378. line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
  1379. pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
  1380. pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
  1381. line->id, pos);
  1382. }
  1383. kfree(ppa);
  1384. mempool_free(line_ws, pblk->gen_ws_pool);
  1385. }
  1386. void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
  1387. void (*work)(struct work_struct *), gfp_t gfp_mask,
  1388. struct workqueue_struct *wq)
  1389. {
  1390. struct pblk_line_ws *line_ws;
  1391. line_ws = mempool_alloc(pblk->gen_ws_pool, gfp_mask);
  1392. line_ws->pblk = pblk;
  1393. line_ws->line = line;
  1394. line_ws->priv = priv;
  1395. INIT_WORK(&line_ws->ws, work);
  1396. queue_work(wq, &line_ws->ws);
  1397. }
  1398. static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
  1399. int nr_ppas, int pos)
  1400. {
  1401. struct pblk_lun *rlun = &pblk->luns[pos];
  1402. int ret;
  1403. /*
  1404. * Only send one inflight I/O per LUN. Since we map at a page
  1405. * granurality, all ppas in the I/O will map to the same LUN
  1406. */
  1407. #ifdef CONFIG_NVM_DEBUG
  1408. int i;
  1409. for (i = 1; i < nr_ppas; i++)
  1410. WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
  1411. ppa_list[0].g.ch != ppa_list[i].g.ch);
  1412. #endif
  1413. ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
  1414. if (ret == -ETIME || ret == -EINTR)
  1415. pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret);
  1416. }
  1417. void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
  1418. {
  1419. struct nvm_tgt_dev *dev = pblk->dev;
  1420. struct nvm_geo *geo = &dev->geo;
  1421. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1422. __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
  1423. }
  1424. void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1425. unsigned long *lun_bitmap)
  1426. {
  1427. struct nvm_tgt_dev *dev = pblk->dev;
  1428. struct nvm_geo *geo = &dev->geo;
  1429. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1430. /* If the LUN has been locked for this same request, do no attempt to
  1431. * lock it again
  1432. */
  1433. if (test_and_set_bit(pos, lun_bitmap))
  1434. return;
  1435. __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
  1436. }
  1437. void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
  1438. {
  1439. struct nvm_tgt_dev *dev = pblk->dev;
  1440. struct nvm_geo *geo = &dev->geo;
  1441. struct pblk_lun *rlun;
  1442. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1443. #ifdef CONFIG_NVM_DEBUG
  1444. int i;
  1445. for (i = 1; i < nr_ppas; i++)
  1446. WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
  1447. ppa_list[0].g.ch != ppa_list[i].g.ch);
  1448. #endif
  1449. rlun = &pblk->luns[pos];
  1450. up(&rlun->wr_sem);
  1451. }
  1452. void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1453. unsigned long *lun_bitmap)
  1454. {
  1455. struct nvm_tgt_dev *dev = pblk->dev;
  1456. struct nvm_geo *geo = &dev->geo;
  1457. struct pblk_lun *rlun;
  1458. int nr_luns = geo->nr_luns;
  1459. int bit = -1;
  1460. while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
  1461. rlun = &pblk->luns[bit];
  1462. up(&rlun->wr_sem);
  1463. }
  1464. }
  1465. void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1466. {
  1467. struct ppa_addr ppa_l2p;
  1468. /* logic error: lba out-of-bounds. Ignore update */
  1469. if (!(lba < pblk->rl.nr_secs)) {
  1470. WARN(1, "pblk: corrupted L2P map request\n");
  1471. return;
  1472. }
  1473. spin_lock(&pblk->trans_lock);
  1474. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1475. if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
  1476. pblk_map_invalidate(pblk, ppa_l2p);
  1477. pblk_trans_map_set(pblk, lba, ppa);
  1478. spin_unlock(&pblk->trans_lock);
  1479. }
  1480. void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1481. {
  1482. #ifdef CONFIG_NVM_DEBUG
  1483. /* Callers must ensure that the ppa points to a cache address */
  1484. BUG_ON(!pblk_addr_in_cache(ppa));
  1485. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
  1486. #endif
  1487. pblk_update_map(pblk, lba, ppa);
  1488. }
  1489. int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
  1490. struct pblk_line *gc_line, u64 paddr_gc)
  1491. {
  1492. struct ppa_addr ppa_l2p, ppa_gc;
  1493. int ret = 1;
  1494. #ifdef CONFIG_NVM_DEBUG
  1495. /* Callers must ensure that the ppa points to a cache address */
  1496. BUG_ON(!pblk_addr_in_cache(ppa_new));
  1497. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
  1498. #endif
  1499. /* logic error: lba out-of-bounds. Ignore update */
  1500. if (!(lba < pblk->rl.nr_secs)) {
  1501. WARN(1, "pblk: corrupted L2P map request\n");
  1502. return 0;
  1503. }
  1504. spin_lock(&pblk->trans_lock);
  1505. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1506. ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
  1507. if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
  1508. spin_lock(&gc_line->lock);
  1509. WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
  1510. "pblk: corrupted GC update");
  1511. spin_unlock(&gc_line->lock);
  1512. ret = 0;
  1513. goto out;
  1514. }
  1515. pblk_trans_map_set(pblk, lba, ppa_new);
  1516. out:
  1517. spin_unlock(&pblk->trans_lock);
  1518. return ret;
  1519. }
  1520. void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
  1521. struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
  1522. {
  1523. struct ppa_addr ppa_l2p;
  1524. #ifdef CONFIG_NVM_DEBUG
  1525. /* Callers must ensure that the ppa points to a device address */
  1526. BUG_ON(pblk_addr_in_cache(ppa_mapped));
  1527. #endif
  1528. /* Invalidate and discard padded entries */
  1529. if (lba == ADDR_EMPTY) {
  1530. #ifdef CONFIG_NVM_DEBUG
  1531. atomic_long_inc(&pblk->padded_wb);
  1532. #endif
  1533. if (!pblk_ppa_empty(ppa_mapped))
  1534. pblk_map_invalidate(pblk, ppa_mapped);
  1535. return;
  1536. }
  1537. /* logic error: lba out-of-bounds. Ignore update */
  1538. if (!(lba < pblk->rl.nr_secs)) {
  1539. WARN(1, "pblk: corrupted L2P map request\n");
  1540. return;
  1541. }
  1542. spin_lock(&pblk->trans_lock);
  1543. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1544. /* Do not update L2P if the cacheline has been updated. In this case,
  1545. * the mapped ppa must be invalidated
  1546. */
  1547. if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
  1548. if (!pblk_ppa_empty(ppa_mapped))
  1549. pblk_map_invalidate(pblk, ppa_mapped);
  1550. goto out;
  1551. }
  1552. #ifdef CONFIG_NVM_DEBUG
  1553. WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
  1554. #endif
  1555. pblk_trans_map_set(pblk, lba, ppa_mapped);
  1556. out:
  1557. spin_unlock(&pblk->trans_lock);
  1558. }
  1559. void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
  1560. sector_t blba, int nr_secs)
  1561. {
  1562. int i;
  1563. spin_lock(&pblk->trans_lock);
  1564. for (i = 0; i < nr_secs; i++) {
  1565. struct ppa_addr ppa;
  1566. ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
  1567. /* If the L2P entry maps to a line, the reference is valid */
  1568. if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
  1569. int line_id = pblk_dev_ppa_to_line(ppa);
  1570. struct pblk_line *line = &pblk->lines[line_id];
  1571. kref_get(&line->ref);
  1572. }
  1573. }
  1574. spin_unlock(&pblk->trans_lock);
  1575. }
  1576. void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
  1577. u64 *lba_list, int nr_secs)
  1578. {
  1579. u64 lba;
  1580. int i;
  1581. spin_lock(&pblk->trans_lock);
  1582. for (i = 0; i < nr_secs; i++) {
  1583. lba = lba_list[i];
  1584. if (lba != ADDR_EMPTY) {
  1585. /* logic error: lba out-of-bounds. Ignore update */
  1586. if (!(lba < pblk->rl.nr_secs)) {
  1587. WARN(1, "pblk: corrupted L2P map request\n");
  1588. continue;
  1589. }
  1590. ppas[i] = pblk_trans_map_get(pblk, lba);
  1591. }
  1592. }
  1593. spin_unlock(&pblk->trans_lock);
  1594. }