pblk-core.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-core.c - pblk's core functionality
  16. *
  17. */
  18. #include "pblk.h"
  19. static void pblk_line_mark_bb(struct work_struct *work)
  20. {
  21. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  22. ws);
  23. struct pblk *pblk = line_ws->pblk;
  24. struct nvm_tgt_dev *dev = pblk->dev;
  25. struct ppa_addr *ppa = line_ws->priv;
  26. int ret;
  27. ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
  28. if (ret) {
  29. struct pblk_line *line;
  30. int pos;
  31. line = &pblk->lines[pblk_ppa_to_line(*ppa)];
  32. pos = pblk_ppa_to_pos(&dev->geo, *ppa);
  33. pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
  34. line->id, pos);
  35. }
  36. kfree(ppa);
  37. mempool_free(line_ws, pblk->gen_ws_pool);
  38. }
  39. static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
  40. struct ppa_addr *ppa)
  41. {
  42. struct nvm_tgt_dev *dev = pblk->dev;
  43. struct nvm_geo *geo = &dev->geo;
  44. int pos = pblk_ppa_to_pos(geo, *ppa);
  45. pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
  46. atomic_long_inc(&pblk->erase_failed);
  47. atomic_dec(&line->blk_in_line);
  48. if (test_and_set_bit(pos, line->blk_bitmap))
  49. pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
  50. line->id, pos);
  51. pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
  52. GFP_ATOMIC, pblk->bb_wq);
  53. }
  54. static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
  55. {
  56. struct pblk_line *line;
  57. line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
  58. atomic_dec(&line->left_seblks);
  59. if (rqd->error) {
  60. struct ppa_addr *ppa;
  61. ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
  62. if (!ppa)
  63. return;
  64. *ppa = rqd->ppa_addr;
  65. pblk_mark_bb(pblk, line, ppa);
  66. }
  67. atomic_dec(&pblk->inflight_io);
  68. }
  69. /* Erase completion assumes that only one block is erased at the time */
  70. static void pblk_end_io_erase(struct nvm_rq *rqd)
  71. {
  72. struct pblk *pblk = rqd->private;
  73. __pblk_end_io_erase(pblk, rqd);
  74. mempool_free(rqd, pblk->e_rq_pool);
  75. }
  76. void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
  77. u64 paddr)
  78. {
  79. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  80. struct list_head *move_list = NULL;
  81. /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
  82. * table is modified with reclaimed sectors, a check is done to endure
  83. * that newer updates are not overwritten.
  84. */
  85. spin_lock(&line->lock);
  86. WARN_ON(line->state == PBLK_LINESTATE_FREE);
  87. if (test_and_set_bit(paddr, line->invalid_bitmap)) {
  88. WARN_ONCE(1, "pblk: double invalidate\n");
  89. spin_unlock(&line->lock);
  90. return;
  91. }
  92. le32_add_cpu(line->vsc, -1);
  93. if (line->state == PBLK_LINESTATE_CLOSED)
  94. move_list = pblk_line_gc_list(pblk, line);
  95. spin_unlock(&line->lock);
  96. if (move_list) {
  97. spin_lock(&l_mg->gc_lock);
  98. spin_lock(&line->lock);
  99. /* Prevent moving a line that has just been chosen for GC */
  100. if (line->state == PBLK_LINESTATE_GC) {
  101. spin_unlock(&line->lock);
  102. spin_unlock(&l_mg->gc_lock);
  103. return;
  104. }
  105. spin_unlock(&line->lock);
  106. list_move_tail(&line->list, move_list);
  107. spin_unlock(&l_mg->gc_lock);
  108. }
  109. }
  110. void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
  111. {
  112. struct pblk_line *line;
  113. u64 paddr;
  114. int line_id;
  115. #ifdef CONFIG_NVM_DEBUG
  116. /* Callers must ensure that the ppa points to a device address */
  117. BUG_ON(pblk_addr_in_cache(ppa));
  118. BUG_ON(pblk_ppa_empty(ppa));
  119. #endif
  120. line_id = pblk_ppa_to_line(ppa);
  121. line = &pblk->lines[line_id];
  122. paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
  123. __pblk_map_invalidate(pblk, line, paddr);
  124. }
  125. static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
  126. unsigned int nr_secs)
  127. {
  128. sector_t lba;
  129. spin_lock(&pblk->trans_lock);
  130. for (lba = slba; lba < slba + nr_secs; lba++) {
  131. struct ppa_addr ppa;
  132. ppa = pblk_trans_map_get(pblk, lba);
  133. if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
  134. pblk_map_invalidate(pblk, ppa);
  135. pblk_ppa_set_empty(&ppa);
  136. pblk_trans_map_set(pblk, lba, ppa);
  137. }
  138. spin_unlock(&pblk->trans_lock);
  139. }
  140. /* Caller must guarantee that the request is a valid type */
  141. struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
  142. {
  143. mempool_t *pool;
  144. struct nvm_rq *rqd;
  145. int rq_size;
  146. switch (type) {
  147. case PBLK_WRITE:
  148. case PBLK_WRITE_INT:
  149. pool = pblk->w_rq_pool;
  150. rq_size = pblk_w_rq_size;
  151. break;
  152. case PBLK_READ:
  153. pool = pblk->r_rq_pool;
  154. rq_size = pblk_g_rq_size;
  155. break;
  156. default:
  157. pool = pblk->e_rq_pool;
  158. rq_size = pblk_g_rq_size;
  159. }
  160. rqd = mempool_alloc(pool, GFP_KERNEL);
  161. memset(rqd, 0, rq_size);
  162. return rqd;
  163. }
  164. /* Typically used on completion path. Cannot guarantee request consistency */
  165. void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
  166. {
  167. struct nvm_tgt_dev *dev = pblk->dev;
  168. mempool_t *pool;
  169. switch (type) {
  170. case PBLK_WRITE:
  171. kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
  172. case PBLK_WRITE_INT:
  173. pool = pblk->w_rq_pool;
  174. break;
  175. case PBLK_READ:
  176. pool = pblk->r_rq_pool;
  177. break;
  178. case PBLK_ERASE:
  179. pool = pblk->e_rq_pool;
  180. break;
  181. default:
  182. pr_err("pblk: trying to free unknown rqd type\n");
  183. return;
  184. }
  185. nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
  186. mempool_free(rqd, pool);
  187. }
  188. void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
  189. int nr_pages)
  190. {
  191. struct bio_vec bv;
  192. int i;
  193. WARN_ON(off + nr_pages != bio->bi_vcnt);
  194. for (i = off; i < nr_pages + off; i++) {
  195. bv = bio->bi_io_vec[i];
  196. mempool_free(bv.bv_page, pblk->page_bio_pool);
  197. }
  198. }
  199. int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
  200. int nr_pages)
  201. {
  202. struct request_queue *q = pblk->dev->q;
  203. struct page *page;
  204. int i, ret;
  205. for (i = 0; i < nr_pages; i++) {
  206. page = mempool_alloc(pblk->page_bio_pool, flags);
  207. ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
  208. if (ret != PBLK_EXPOSED_PAGE_SIZE) {
  209. pr_err("pblk: could not add page to bio\n");
  210. mempool_free(page, pblk->page_bio_pool);
  211. goto err;
  212. }
  213. }
  214. return 0;
  215. err:
  216. pblk_bio_free_pages(pblk, bio, 0, i - 1);
  217. return -1;
  218. }
  219. static void pblk_write_kick(struct pblk *pblk)
  220. {
  221. wake_up_process(pblk->writer_ts);
  222. mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
  223. }
  224. void pblk_write_timer_fn(struct timer_list *t)
  225. {
  226. struct pblk *pblk = from_timer(pblk, t, wtimer);
  227. /* kick the write thread every tick to flush outstanding data */
  228. pblk_write_kick(pblk);
  229. }
  230. void pblk_write_should_kick(struct pblk *pblk)
  231. {
  232. unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
  233. if (secs_avail >= pblk->min_write_pgs)
  234. pblk_write_kick(pblk);
  235. }
  236. void pblk_end_io_sync(struct nvm_rq *rqd)
  237. {
  238. struct completion *waiting = rqd->private;
  239. complete(waiting);
  240. }
  241. static void pblk_wait_for_meta(struct pblk *pblk)
  242. {
  243. do {
  244. if (!atomic_read(&pblk->inflight_io))
  245. break;
  246. schedule();
  247. } while (1);
  248. }
  249. static void pblk_flush_writer(struct pblk *pblk)
  250. {
  251. pblk_rb_flush(&pblk->rwb);
  252. do {
  253. if (!pblk_rb_sync_count(&pblk->rwb))
  254. break;
  255. pblk_write_kick(pblk);
  256. schedule();
  257. } while (1);
  258. }
  259. struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
  260. {
  261. struct pblk_line_meta *lm = &pblk->lm;
  262. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  263. struct list_head *move_list = NULL;
  264. int vsc = le32_to_cpu(*line->vsc);
  265. lockdep_assert_held(&line->lock);
  266. if (!vsc) {
  267. if (line->gc_group != PBLK_LINEGC_FULL) {
  268. line->gc_group = PBLK_LINEGC_FULL;
  269. move_list = &l_mg->gc_full_list;
  270. }
  271. } else if (vsc < lm->high_thrs) {
  272. if (line->gc_group != PBLK_LINEGC_HIGH) {
  273. line->gc_group = PBLK_LINEGC_HIGH;
  274. move_list = &l_mg->gc_high_list;
  275. }
  276. } else if (vsc < lm->mid_thrs) {
  277. if (line->gc_group != PBLK_LINEGC_MID) {
  278. line->gc_group = PBLK_LINEGC_MID;
  279. move_list = &l_mg->gc_mid_list;
  280. }
  281. } else if (vsc < line->sec_in_line) {
  282. if (line->gc_group != PBLK_LINEGC_LOW) {
  283. line->gc_group = PBLK_LINEGC_LOW;
  284. move_list = &l_mg->gc_low_list;
  285. }
  286. } else if (vsc == line->sec_in_line) {
  287. if (line->gc_group != PBLK_LINEGC_EMPTY) {
  288. line->gc_group = PBLK_LINEGC_EMPTY;
  289. move_list = &l_mg->gc_empty_list;
  290. }
  291. } else {
  292. line->state = PBLK_LINESTATE_CORRUPT;
  293. line->gc_group = PBLK_LINEGC_NONE;
  294. move_list = &l_mg->corrupt_list;
  295. pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
  296. line->id, vsc,
  297. line->sec_in_line,
  298. lm->high_thrs, lm->mid_thrs);
  299. }
  300. return move_list;
  301. }
  302. void pblk_discard(struct pblk *pblk, struct bio *bio)
  303. {
  304. sector_t slba = pblk_get_lba(bio);
  305. sector_t nr_secs = pblk_get_secs(bio);
  306. pblk_invalidate_range(pblk, slba, nr_secs);
  307. }
  308. void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
  309. {
  310. atomic_long_inc(&pblk->write_failed);
  311. #ifdef CONFIG_NVM_DEBUG
  312. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  313. #endif
  314. }
  315. void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
  316. {
  317. /* Empty page read is not necessarily an error (e.g., L2P recovery) */
  318. if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
  319. atomic_long_inc(&pblk->read_empty);
  320. return;
  321. }
  322. switch (rqd->error) {
  323. case NVM_RSP_WARN_HIGHECC:
  324. atomic_long_inc(&pblk->read_high_ecc);
  325. break;
  326. case NVM_RSP_ERR_FAILECC:
  327. case NVM_RSP_ERR_FAILCRC:
  328. atomic_long_inc(&pblk->read_failed);
  329. break;
  330. default:
  331. pr_err("pblk: unknown read error:%d\n", rqd->error);
  332. }
  333. #ifdef CONFIG_NVM_DEBUG
  334. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  335. #endif
  336. }
  337. void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
  338. {
  339. pblk->sec_per_write = sec_per_write;
  340. }
  341. int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
  342. {
  343. struct nvm_tgt_dev *dev = pblk->dev;
  344. #ifdef CONFIG_NVM_DEBUG
  345. int ret;
  346. ret = pblk_check_io(pblk, rqd);
  347. if (ret)
  348. return ret;
  349. #endif
  350. atomic_inc(&pblk->inflight_io);
  351. return nvm_submit_io(dev, rqd);
  352. }
  353. int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
  354. {
  355. struct nvm_tgt_dev *dev = pblk->dev;
  356. #ifdef CONFIG_NVM_DEBUG
  357. int ret;
  358. ret = pblk_check_io(pblk, rqd);
  359. if (ret)
  360. return ret;
  361. #endif
  362. atomic_inc(&pblk->inflight_io);
  363. return nvm_submit_io_sync(dev, rqd);
  364. }
  365. static void pblk_bio_map_addr_endio(struct bio *bio)
  366. {
  367. bio_put(bio);
  368. }
  369. struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
  370. unsigned int nr_secs, unsigned int len,
  371. int alloc_type, gfp_t gfp_mask)
  372. {
  373. struct nvm_tgt_dev *dev = pblk->dev;
  374. void *kaddr = data;
  375. struct page *page;
  376. struct bio *bio;
  377. int i, ret;
  378. if (alloc_type == PBLK_KMALLOC_META)
  379. return bio_map_kern(dev->q, kaddr, len, gfp_mask);
  380. bio = bio_kmalloc(gfp_mask, nr_secs);
  381. if (!bio)
  382. return ERR_PTR(-ENOMEM);
  383. for (i = 0; i < nr_secs; i++) {
  384. page = vmalloc_to_page(kaddr);
  385. if (!page) {
  386. pr_err("pblk: could not map vmalloc bio\n");
  387. bio_put(bio);
  388. bio = ERR_PTR(-ENOMEM);
  389. goto out;
  390. }
  391. ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
  392. if (ret != PAGE_SIZE) {
  393. pr_err("pblk: could not add page to bio\n");
  394. bio_put(bio);
  395. bio = ERR_PTR(-ENOMEM);
  396. goto out;
  397. }
  398. kaddr += PAGE_SIZE;
  399. }
  400. bio->bi_end_io = pblk_bio_map_addr_endio;
  401. out:
  402. return bio;
  403. }
  404. int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
  405. unsigned long secs_to_flush)
  406. {
  407. int max = pblk->sec_per_write;
  408. int min = pblk->min_write_pgs;
  409. int secs_to_sync = 0;
  410. if (secs_avail >= max)
  411. secs_to_sync = max;
  412. else if (secs_avail >= min)
  413. secs_to_sync = min * (secs_avail / min);
  414. else if (secs_to_flush)
  415. secs_to_sync = min;
  416. return secs_to_sync;
  417. }
  418. void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  419. {
  420. u64 addr;
  421. int i;
  422. spin_lock(&line->lock);
  423. addr = find_next_zero_bit(line->map_bitmap,
  424. pblk->lm.sec_per_line, line->cur_sec);
  425. line->cur_sec = addr - nr_secs;
  426. for (i = 0; i < nr_secs; i++, line->cur_sec--)
  427. WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
  428. spin_unlock(&line->lock);
  429. }
  430. u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  431. {
  432. u64 addr;
  433. int i;
  434. lockdep_assert_held(&line->lock);
  435. /* logic error: ppa out-of-bounds. Prevent generating bad address */
  436. if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
  437. WARN(1, "pblk: page allocation out of bounds\n");
  438. nr_secs = pblk->lm.sec_per_line - line->cur_sec;
  439. }
  440. line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
  441. pblk->lm.sec_per_line, line->cur_sec);
  442. for (i = 0; i < nr_secs; i++, line->cur_sec++)
  443. WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
  444. return addr;
  445. }
  446. u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  447. {
  448. u64 addr;
  449. /* Lock needed in case a write fails and a recovery needs to remap
  450. * failed write buffer entries
  451. */
  452. spin_lock(&line->lock);
  453. addr = __pblk_alloc_page(pblk, line, nr_secs);
  454. line->left_msecs -= nr_secs;
  455. WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
  456. spin_unlock(&line->lock);
  457. return addr;
  458. }
  459. u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
  460. {
  461. u64 paddr;
  462. spin_lock(&line->lock);
  463. paddr = find_next_zero_bit(line->map_bitmap,
  464. pblk->lm.sec_per_line, line->cur_sec);
  465. spin_unlock(&line->lock);
  466. return paddr;
  467. }
  468. /*
  469. * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
  470. * taking the per LUN semaphore.
  471. */
  472. static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
  473. void *emeta_buf, u64 paddr, int dir)
  474. {
  475. struct nvm_tgt_dev *dev = pblk->dev;
  476. struct nvm_geo *geo = &dev->geo;
  477. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  478. struct pblk_line_meta *lm = &pblk->lm;
  479. void *ppa_list, *meta_list;
  480. struct bio *bio;
  481. struct nvm_rq rqd;
  482. dma_addr_t dma_ppa_list, dma_meta_list;
  483. int min = pblk->min_write_pgs;
  484. int left_ppas = lm->emeta_sec[0];
  485. int id = line->id;
  486. int rq_ppas, rq_len;
  487. int cmd_op, bio_op;
  488. int i, j;
  489. int ret;
  490. if (dir == PBLK_WRITE) {
  491. bio_op = REQ_OP_WRITE;
  492. cmd_op = NVM_OP_PWRITE;
  493. } else if (dir == PBLK_READ) {
  494. bio_op = REQ_OP_READ;
  495. cmd_op = NVM_OP_PREAD;
  496. } else
  497. return -EINVAL;
  498. meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  499. &dma_meta_list);
  500. if (!meta_list)
  501. return -ENOMEM;
  502. ppa_list = meta_list + pblk_dma_meta_size;
  503. dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
  504. next_rq:
  505. memset(&rqd, 0, sizeof(struct nvm_rq));
  506. rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
  507. rq_len = rq_ppas * geo->sec_size;
  508. bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
  509. l_mg->emeta_alloc_type, GFP_KERNEL);
  510. if (IS_ERR(bio)) {
  511. ret = PTR_ERR(bio);
  512. goto free_rqd_dma;
  513. }
  514. bio->bi_iter.bi_sector = 0; /* internal bio */
  515. bio_set_op_attrs(bio, bio_op, 0);
  516. rqd.bio = bio;
  517. rqd.meta_list = meta_list;
  518. rqd.ppa_list = ppa_list;
  519. rqd.dma_meta_list = dma_meta_list;
  520. rqd.dma_ppa_list = dma_ppa_list;
  521. rqd.opcode = cmd_op;
  522. rqd.nr_ppas = rq_ppas;
  523. if (dir == PBLK_WRITE) {
  524. struct pblk_sec_meta *meta_list = rqd.meta_list;
  525. rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
  526. for (i = 0; i < rqd.nr_ppas; ) {
  527. spin_lock(&line->lock);
  528. paddr = __pblk_alloc_page(pblk, line, min);
  529. spin_unlock(&line->lock);
  530. for (j = 0; j < min; j++, i++, paddr++) {
  531. meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
  532. rqd.ppa_list[i] =
  533. addr_to_gen_ppa(pblk, paddr, id);
  534. }
  535. }
  536. } else {
  537. for (i = 0; i < rqd.nr_ppas; ) {
  538. struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
  539. int pos = pblk_ppa_to_pos(geo, ppa);
  540. int read_type = PBLK_READ_RANDOM;
  541. if (pblk_io_aligned(pblk, rq_ppas))
  542. read_type = PBLK_READ_SEQUENTIAL;
  543. rqd.flags = pblk_set_read_mode(pblk, read_type);
  544. while (test_bit(pos, line->blk_bitmap)) {
  545. paddr += min;
  546. if (pblk_boundary_paddr_checks(pblk, paddr)) {
  547. pr_err("pblk: corrupt emeta line:%d\n",
  548. line->id);
  549. bio_put(bio);
  550. ret = -EINTR;
  551. goto free_rqd_dma;
  552. }
  553. ppa = addr_to_gen_ppa(pblk, paddr, id);
  554. pos = pblk_ppa_to_pos(geo, ppa);
  555. }
  556. if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
  557. pr_err("pblk: corrupt emeta line:%d\n",
  558. line->id);
  559. bio_put(bio);
  560. ret = -EINTR;
  561. goto free_rqd_dma;
  562. }
  563. for (j = 0; j < min; j++, i++, paddr++)
  564. rqd.ppa_list[i] =
  565. addr_to_gen_ppa(pblk, paddr, line->id);
  566. }
  567. }
  568. ret = pblk_submit_io_sync(pblk, &rqd);
  569. if (ret) {
  570. pr_err("pblk: emeta I/O submission failed: %d\n", ret);
  571. bio_put(bio);
  572. goto free_rqd_dma;
  573. }
  574. atomic_dec(&pblk->inflight_io);
  575. if (rqd.error) {
  576. if (dir == PBLK_WRITE)
  577. pblk_log_write_err(pblk, &rqd);
  578. else
  579. pblk_log_read_err(pblk, &rqd);
  580. }
  581. emeta_buf += rq_len;
  582. left_ppas -= rq_ppas;
  583. if (left_ppas)
  584. goto next_rq;
  585. free_rqd_dma:
  586. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  587. return ret;
  588. }
  589. u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
  590. {
  591. struct nvm_tgt_dev *dev = pblk->dev;
  592. struct nvm_geo *geo = &dev->geo;
  593. struct pblk_line_meta *lm = &pblk->lm;
  594. int bit;
  595. /* This usually only happens on bad lines */
  596. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  597. if (bit >= lm->blk_per_line)
  598. return -1;
  599. return bit * geo->sec_per_pl;
  600. }
  601. static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
  602. u64 paddr, int dir)
  603. {
  604. struct nvm_tgt_dev *dev = pblk->dev;
  605. struct pblk_line_meta *lm = &pblk->lm;
  606. struct bio *bio;
  607. struct nvm_rq rqd;
  608. __le64 *lba_list = NULL;
  609. int i, ret;
  610. int cmd_op, bio_op;
  611. int flags;
  612. if (dir == PBLK_WRITE) {
  613. bio_op = REQ_OP_WRITE;
  614. cmd_op = NVM_OP_PWRITE;
  615. flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
  616. lba_list = emeta_to_lbas(pblk, line->emeta->buf);
  617. } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
  618. bio_op = REQ_OP_READ;
  619. cmd_op = NVM_OP_PREAD;
  620. flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
  621. } else
  622. return -EINVAL;
  623. memset(&rqd, 0, sizeof(struct nvm_rq));
  624. rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  625. &rqd.dma_meta_list);
  626. if (!rqd.meta_list)
  627. return -ENOMEM;
  628. rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
  629. rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
  630. bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
  631. if (IS_ERR(bio)) {
  632. ret = PTR_ERR(bio);
  633. goto free_ppa_list;
  634. }
  635. bio->bi_iter.bi_sector = 0; /* internal bio */
  636. bio_set_op_attrs(bio, bio_op, 0);
  637. rqd.bio = bio;
  638. rqd.opcode = cmd_op;
  639. rqd.flags = flags;
  640. rqd.nr_ppas = lm->smeta_sec;
  641. for (i = 0; i < lm->smeta_sec; i++, paddr++) {
  642. struct pblk_sec_meta *meta_list = rqd.meta_list;
  643. rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
  644. if (dir == PBLK_WRITE) {
  645. __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
  646. meta_list[i].lba = lba_list[paddr] = addr_empty;
  647. }
  648. }
  649. /*
  650. * This I/O is sent by the write thread when a line is replace. Since
  651. * the write thread is the only one sending write and erase commands,
  652. * there is no need to take the LUN semaphore.
  653. */
  654. ret = pblk_submit_io_sync(pblk, &rqd);
  655. if (ret) {
  656. pr_err("pblk: smeta I/O submission failed: %d\n", ret);
  657. bio_put(bio);
  658. goto free_ppa_list;
  659. }
  660. atomic_dec(&pblk->inflight_io);
  661. if (rqd.error) {
  662. if (dir == PBLK_WRITE)
  663. pblk_log_write_err(pblk, &rqd);
  664. else if (dir == PBLK_READ)
  665. pblk_log_read_err(pblk, &rqd);
  666. }
  667. free_ppa_list:
  668. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  669. return ret;
  670. }
  671. int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
  672. {
  673. u64 bpaddr = pblk_line_smeta_start(pblk, line);
  674. return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
  675. }
  676. int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
  677. void *emeta_buf)
  678. {
  679. return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
  680. line->emeta_ssec, PBLK_READ);
  681. }
  682. static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
  683. struct ppa_addr ppa)
  684. {
  685. rqd->opcode = NVM_OP_ERASE;
  686. rqd->ppa_addr = ppa;
  687. rqd->nr_ppas = 1;
  688. rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
  689. rqd->bio = NULL;
  690. }
  691. static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
  692. {
  693. struct nvm_rq rqd;
  694. int ret = 0;
  695. memset(&rqd, 0, sizeof(struct nvm_rq));
  696. pblk_setup_e_rq(pblk, &rqd, ppa);
  697. /* The write thread schedules erases so that it minimizes disturbances
  698. * with writes. Thus, there is no need to take the LUN semaphore.
  699. */
  700. ret = pblk_submit_io_sync(pblk, &rqd);
  701. if (ret) {
  702. struct nvm_tgt_dev *dev = pblk->dev;
  703. struct nvm_geo *geo = &dev->geo;
  704. pr_err("pblk: could not sync erase line:%d,blk:%d\n",
  705. pblk_ppa_to_line(ppa),
  706. pblk_ppa_to_pos(geo, ppa));
  707. rqd.error = ret;
  708. goto out;
  709. }
  710. out:
  711. rqd.private = pblk;
  712. __pblk_end_io_erase(pblk, &rqd);
  713. return ret;
  714. }
  715. int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
  716. {
  717. struct pblk_line_meta *lm = &pblk->lm;
  718. struct ppa_addr ppa;
  719. int ret, bit = -1;
  720. /* Erase only good blocks, one at a time */
  721. do {
  722. spin_lock(&line->lock);
  723. bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
  724. bit + 1);
  725. if (bit >= lm->blk_per_line) {
  726. spin_unlock(&line->lock);
  727. break;
  728. }
  729. ppa = pblk->luns[bit].bppa; /* set ch and lun */
  730. ppa.g.blk = line->id;
  731. atomic_dec(&line->left_eblks);
  732. WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
  733. spin_unlock(&line->lock);
  734. ret = pblk_blk_erase_sync(pblk, ppa);
  735. if (ret) {
  736. pr_err("pblk: failed to erase line %d\n", line->id);
  737. return ret;
  738. }
  739. } while (1);
  740. return 0;
  741. }
  742. static void pblk_line_setup_metadata(struct pblk_line *line,
  743. struct pblk_line_mgmt *l_mg,
  744. struct pblk_line_meta *lm)
  745. {
  746. int meta_line;
  747. lockdep_assert_held(&l_mg->free_lock);
  748. retry_meta:
  749. meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  750. if (meta_line == PBLK_DATA_LINES) {
  751. spin_unlock(&l_mg->free_lock);
  752. io_schedule();
  753. spin_lock(&l_mg->free_lock);
  754. goto retry_meta;
  755. }
  756. set_bit(meta_line, &l_mg->meta_bitmap);
  757. line->meta_line = meta_line;
  758. line->smeta = l_mg->sline_meta[meta_line];
  759. line->emeta = l_mg->eline_meta[meta_line];
  760. memset(line->smeta, 0, lm->smeta_len);
  761. memset(line->emeta->buf, 0, lm->emeta_len[0]);
  762. line->emeta->mem = 0;
  763. atomic_set(&line->emeta->sync, 0);
  764. }
  765. /* For now lines are always assumed full lines. Thus, smeta former and current
  766. * lun bitmaps are omitted.
  767. */
  768. static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
  769. struct pblk_line *cur)
  770. {
  771. struct nvm_tgt_dev *dev = pblk->dev;
  772. struct nvm_geo *geo = &dev->geo;
  773. struct pblk_line_meta *lm = &pblk->lm;
  774. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  775. struct pblk_emeta *emeta = line->emeta;
  776. struct line_emeta *emeta_buf = emeta->buf;
  777. struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
  778. int nr_blk_line;
  779. /* After erasing the line, new bad blocks might appear and we risk
  780. * having an invalid line
  781. */
  782. nr_blk_line = lm->blk_per_line -
  783. bitmap_weight(line->blk_bitmap, lm->blk_per_line);
  784. if (nr_blk_line < lm->min_blk_line) {
  785. spin_lock(&l_mg->free_lock);
  786. spin_lock(&line->lock);
  787. line->state = PBLK_LINESTATE_BAD;
  788. spin_unlock(&line->lock);
  789. list_add_tail(&line->list, &l_mg->bad_list);
  790. spin_unlock(&l_mg->free_lock);
  791. pr_debug("pblk: line %d is bad\n", line->id);
  792. return 0;
  793. }
  794. /* Run-time metadata */
  795. line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
  796. /* Mark LUNs allocated in this line (all for now) */
  797. bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
  798. smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
  799. memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
  800. smeta_buf->header.id = cpu_to_le32(line->id);
  801. smeta_buf->header.type = cpu_to_le16(line->type);
  802. smeta_buf->header.version = SMETA_VERSION;
  803. /* Start metadata */
  804. smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
  805. smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
  806. /* Fill metadata among lines */
  807. if (cur) {
  808. memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
  809. smeta_buf->prev_id = cpu_to_le32(cur->id);
  810. cur->emeta->buf->next_id = cpu_to_le32(line->id);
  811. } else {
  812. smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
  813. }
  814. /* All smeta must be set at this point */
  815. smeta_buf->header.crc = cpu_to_le32(
  816. pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
  817. smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
  818. /* End metadata */
  819. memcpy(&emeta_buf->header, &smeta_buf->header,
  820. sizeof(struct line_header));
  821. emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
  822. emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
  823. emeta_buf->nr_valid_lbas = cpu_to_le64(0);
  824. emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
  825. emeta_buf->crc = cpu_to_le32(0);
  826. emeta_buf->prev_id = smeta_buf->prev_id;
  827. return 1;
  828. }
  829. /* For now lines are always assumed full lines. Thus, smeta former and current
  830. * lun bitmaps are omitted.
  831. */
  832. static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
  833. int init)
  834. {
  835. struct nvm_tgt_dev *dev = pblk->dev;
  836. struct nvm_geo *geo = &dev->geo;
  837. struct pblk_line_meta *lm = &pblk->lm;
  838. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  839. int nr_bb = 0;
  840. u64 off;
  841. int bit = -1;
  842. line->sec_in_line = lm->sec_per_line;
  843. /* Capture bad block information on line mapping bitmaps */
  844. while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
  845. bit + 1)) < lm->blk_per_line) {
  846. off = bit * geo->sec_per_pl;
  847. bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
  848. lm->sec_per_line);
  849. bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
  850. lm->sec_per_line);
  851. line->sec_in_line -= geo->sec_per_chk;
  852. if (bit >= lm->emeta_bb)
  853. nr_bb++;
  854. }
  855. /* Mark smeta metadata sectors as bad sectors */
  856. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  857. off = bit * geo->sec_per_pl;
  858. bitmap_set(line->map_bitmap, off, lm->smeta_sec);
  859. line->sec_in_line -= lm->smeta_sec;
  860. line->smeta_ssec = off;
  861. line->cur_sec = off + lm->smeta_sec;
  862. if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
  863. pr_debug("pblk: line smeta I/O failed. Retry\n");
  864. return 1;
  865. }
  866. bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
  867. /* Mark emeta metadata sectors as bad sectors. We need to consider bad
  868. * blocks to make sure that there are enough sectors to store emeta
  869. */
  870. off = lm->sec_per_line - lm->emeta_sec[0];
  871. bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
  872. while (nr_bb) {
  873. off -= geo->sec_per_pl;
  874. if (!test_bit(off, line->invalid_bitmap)) {
  875. bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
  876. nr_bb--;
  877. }
  878. }
  879. line->sec_in_line -= lm->emeta_sec[0];
  880. line->emeta_ssec = off;
  881. line->nr_valid_lbas = 0;
  882. line->left_msecs = line->sec_in_line;
  883. *line->vsc = cpu_to_le32(line->sec_in_line);
  884. if (lm->sec_per_line - line->sec_in_line !=
  885. bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
  886. spin_lock(&line->lock);
  887. line->state = PBLK_LINESTATE_BAD;
  888. spin_unlock(&line->lock);
  889. list_add_tail(&line->list, &l_mg->bad_list);
  890. pr_err("pblk: unexpected line %d is bad\n", line->id);
  891. return 0;
  892. }
  893. return 1;
  894. }
  895. static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
  896. {
  897. struct pblk_line_meta *lm = &pblk->lm;
  898. int blk_in_line = atomic_read(&line->blk_in_line);
  899. line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_ATOMIC);
  900. if (!line->map_bitmap)
  901. return -ENOMEM;
  902. /* will be initialized using bb info from map_bitmap */
  903. line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_ATOMIC);
  904. if (!line->invalid_bitmap) {
  905. kfree(line->map_bitmap);
  906. return -ENOMEM;
  907. }
  908. spin_lock(&line->lock);
  909. if (line->state != PBLK_LINESTATE_FREE) {
  910. kfree(line->map_bitmap);
  911. kfree(line->invalid_bitmap);
  912. spin_unlock(&line->lock);
  913. WARN(1, "pblk: corrupted line %d, state %d\n",
  914. line->id, line->state);
  915. return -EAGAIN;
  916. }
  917. line->state = PBLK_LINESTATE_OPEN;
  918. atomic_set(&line->left_eblks, blk_in_line);
  919. atomic_set(&line->left_seblks, blk_in_line);
  920. line->meta_distance = lm->meta_distance;
  921. spin_unlock(&line->lock);
  922. /* Bad blocks do not need to be erased */
  923. bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
  924. kref_init(&line->ref);
  925. return 0;
  926. }
  927. int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
  928. {
  929. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  930. int ret;
  931. spin_lock(&l_mg->free_lock);
  932. l_mg->data_line = line;
  933. list_del(&line->list);
  934. ret = pblk_line_prepare(pblk, line);
  935. if (ret) {
  936. list_add(&line->list, &l_mg->free_list);
  937. spin_unlock(&l_mg->free_lock);
  938. return ret;
  939. }
  940. spin_unlock(&l_mg->free_lock);
  941. pblk_rl_free_lines_dec(&pblk->rl, line, true);
  942. if (!pblk_line_init_bb(pblk, line, 0)) {
  943. list_add(&line->list, &l_mg->free_list);
  944. return -EINTR;
  945. }
  946. return 0;
  947. }
  948. void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
  949. {
  950. kfree(line->map_bitmap);
  951. line->map_bitmap = NULL;
  952. line->smeta = NULL;
  953. line->emeta = NULL;
  954. }
  955. struct pblk_line *pblk_line_get(struct pblk *pblk)
  956. {
  957. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  958. struct pblk_line_meta *lm = &pblk->lm;
  959. struct pblk_line *line;
  960. int ret, bit;
  961. lockdep_assert_held(&l_mg->free_lock);
  962. retry:
  963. if (list_empty(&l_mg->free_list)) {
  964. pr_err("pblk: no free lines\n");
  965. return NULL;
  966. }
  967. line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
  968. list_del(&line->list);
  969. l_mg->nr_free_lines--;
  970. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  971. if (unlikely(bit >= lm->blk_per_line)) {
  972. spin_lock(&line->lock);
  973. line->state = PBLK_LINESTATE_BAD;
  974. spin_unlock(&line->lock);
  975. list_add_tail(&line->list, &l_mg->bad_list);
  976. pr_debug("pblk: line %d is bad\n", line->id);
  977. goto retry;
  978. }
  979. ret = pblk_line_prepare(pblk, line);
  980. if (ret) {
  981. if (ret == -EAGAIN) {
  982. list_add(&line->list, &l_mg->corrupt_list);
  983. goto retry;
  984. } else {
  985. pr_err("pblk: failed to prepare line %d\n", line->id);
  986. list_add(&line->list, &l_mg->free_list);
  987. l_mg->nr_free_lines++;
  988. return NULL;
  989. }
  990. }
  991. return line;
  992. }
  993. static struct pblk_line *pblk_line_retry(struct pblk *pblk,
  994. struct pblk_line *line)
  995. {
  996. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  997. struct pblk_line *retry_line;
  998. retry:
  999. spin_lock(&l_mg->free_lock);
  1000. retry_line = pblk_line_get(pblk);
  1001. if (!retry_line) {
  1002. l_mg->data_line = NULL;
  1003. spin_unlock(&l_mg->free_lock);
  1004. return NULL;
  1005. }
  1006. retry_line->smeta = line->smeta;
  1007. retry_line->emeta = line->emeta;
  1008. retry_line->meta_line = line->meta_line;
  1009. pblk_line_free(pblk, line);
  1010. l_mg->data_line = retry_line;
  1011. spin_unlock(&l_mg->free_lock);
  1012. pblk_rl_free_lines_dec(&pblk->rl, line, false);
  1013. if (pblk_line_erase(pblk, retry_line))
  1014. goto retry;
  1015. return retry_line;
  1016. }
  1017. static void pblk_set_space_limit(struct pblk *pblk)
  1018. {
  1019. struct pblk_rl *rl = &pblk->rl;
  1020. atomic_set(&rl->rb_space, 0);
  1021. }
  1022. struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
  1023. {
  1024. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1025. struct pblk_line *line;
  1026. spin_lock(&l_mg->free_lock);
  1027. line = pblk_line_get(pblk);
  1028. if (!line) {
  1029. spin_unlock(&l_mg->free_lock);
  1030. return NULL;
  1031. }
  1032. line->seq_nr = l_mg->d_seq_nr++;
  1033. line->type = PBLK_LINETYPE_DATA;
  1034. l_mg->data_line = line;
  1035. pblk_line_setup_metadata(line, l_mg, &pblk->lm);
  1036. /* Allocate next line for preparation */
  1037. l_mg->data_next = pblk_line_get(pblk);
  1038. if (!l_mg->data_next) {
  1039. /* If we cannot get a new line, we need to stop the pipeline.
  1040. * Only allow as many writes in as we can store safely and then
  1041. * fail gracefully
  1042. */
  1043. pblk_set_space_limit(pblk);
  1044. l_mg->data_next = NULL;
  1045. } else {
  1046. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1047. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1048. }
  1049. spin_unlock(&l_mg->free_lock);
  1050. if (pblk_line_erase(pblk, line)) {
  1051. line = pblk_line_retry(pblk, line);
  1052. if (!line)
  1053. return NULL;
  1054. }
  1055. retry_setup:
  1056. if (!pblk_line_init_metadata(pblk, line, NULL)) {
  1057. line = pblk_line_retry(pblk, line);
  1058. if (!line)
  1059. return NULL;
  1060. goto retry_setup;
  1061. }
  1062. if (!pblk_line_init_bb(pblk, line, 1)) {
  1063. line = pblk_line_retry(pblk, line);
  1064. if (!line)
  1065. return NULL;
  1066. goto retry_setup;
  1067. }
  1068. pblk_rl_free_lines_dec(&pblk->rl, line, true);
  1069. return line;
  1070. }
  1071. static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
  1072. {
  1073. lockdep_assert_held(&pblk->l_mg.free_lock);
  1074. pblk_set_space_limit(pblk);
  1075. pblk->state = PBLK_STATE_STOPPING;
  1076. }
  1077. static void pblk_line_close_meta_sync(struct pblk *pblk)
  1078. {
  1079. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1080. struct pblk_line_meta *lm = &pblk->lm;
  1081. struct pblk_line *line, *tline;
  1082. LIST_HEAD(list);
  1083. spin_lock(&l_mg->close_lock);
  1084. if (list_empty(&l_mg->emeta_list)) {
  1085. spin_unlock(&l_mg->close_lock);
  1086. return;
  1087. }
  1088. list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
  1089. spin_unlock(&l_mg->close_lock);
  1090. list_for_each_entry_safe(line, tline, &list, list) {
  1091. struct pblk_emeta *emeta = line->emeta;
  1092. while (emeta->mem < lm->emeta_len[0]) {
  1093. int ret;
  1094. ret = pblk_submit_meta_io(pblk, line);
  1095. if (ret) {
  1096. pr_err("pblk: sync meta line %d failed (%d)\n",
  1097. line->id, ret);
  1098. return;
  1099. }
  1100. }
  1101. }
  1102. pblk_wait_for_meta(pblk);
  1103. flush_workqueue(pblk->close_wq);
  1104. }
  1105. void pblk_pipeline_stop(struct pblk *pblk)
  1106. {
  1107. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1108. int ret;
  1109. spin_lock(&l_mg->free_lock);
  1110. if (pblk->state == PBLK_STATE_RECOVERING ||
  1111. pblk->state == PBLK_STATE_STOPPED) {
  1112. spin_unlock(&l_mg->free_lock);
  1113. return;
  1114. }
  1115. pblk->state = PBLK_STATE_RECOVERING;
  1116. spin_unlock(&l_mg->free_lock);
  1117. pblk_flush_writer(pblk);
  1118. pblk_wait_for_meta(pblk);
  1119. ret = pblk_recov_pad(pblk);
  1120. if (ret) {
  1121. pr_err("pblk: could not close data on teardown(%d)\n", ret);
  1122. return;
  1123. }
  1124. flush_workqueue(pblk->bb_wq);
  1125. pblk_line_close_meta_sync(pblk);
  1126. spin_lock(&l_mg->free_lock);
  1127. pblk->state = PBLK_STATE_STOPPED;
  1128. l_mg->data_line = NULL;
  1129. l_mg->data_next = NULL;
  1130. spin_unlock(&l_mg->free_lock);
  1131. }
  1132. struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
  1133. {
  1134. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1135. struct pblk_line *cur, *new = NULL;
  1136. unsigned int left_seblks;
  1137. cur = l_mg->data_line;
  1138. new = l_mg->data_next;
  1139. if (!new)
  1140. goto out;
  1141. l_mg->data_line = new;
  1142. spin_lock(&l_mg->free_lock);
  1143. if (pblk->state != PBLK_STATE_RUNNING) {
  1144. l_mg->data_line = NULL;
  1145. l_mg->data_next = NULL;
  1146. spin_unlock(&l_mg->free_lock);
  1147. goto out;
  1148. }
  1149. pblk_line_setup_metadata(new, l_mg, &pblk->lm);
  1150. spin_unlock(&l_mg->free_lock);
  1151. retry_erase:
  1152. left_seblks = atomic_read(&new->left_seblks);
  1153. if (left_seblks) {
  1154. /* If line is not fully erased, erase it */
  1155. if (atomic_read(&new->left_eblks)) {
  1156. if (pblk_line_erase(pblk, new))
  1157. goto out;
  1158. } else {
  1159. io_schedule();
  1160. }
  1161. goto retry_erase;
  1162. }
  1163. retry_setup:
  1164. if (!pblk_line_init_metadata(pblk, new, cur)) {
  1165. new = pblk_line_retry(pblk, new);
  1166. if (!new)
  1167. goto out;
  1168. goto retry_setup;
  1169. }
  1170. if (!pblk_line_init_bb(pblk, new, 1)) {
  1171. new = pblk_line_retry(pblk, new);
  1172. if (!new)
  1173. goto out;
  1174. goto retry_setup;
  1175. }
  1176. pblk_rl_free_lines_dec(&pblk->rl, new, true);
  1177. /* Allocate next line for preparation */
  1178. spin_lock(&l_mg->free_lock);
  1179. l_mg->data_next = pblk_line_get(pblk);
  1180. if (!l_mg->data_next) {
  1181. /* If we cannot get a new line, we need to stop the pipeline.
  1182. * Only allow as many writes in as we can store safely and then
  1183. * fail gracefully
  1184. */
  1185. pblk_stop_writes(pblk, new);
  1186. l_mg->data_next = NULL;
  1187. } else {
  1188. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1189. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1190. }
  1191. spin_unlock(&l_mg->free_lock);
  1192. out:
  1193. return new;
  1194. }
  1195. void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
  1196. {
  1197. kfree(line->map_bitmap);
  1198. kfree(line->invalid_bitmap);
  1199. *line->vsc = cpu_to_le32(EMPTY_ENTRY);
  1200. line->map_bitmap = NULL;
  1201. line->invalid_bitmap = NULL;
  1202. line->smeta = NULL;
  1203. line->emeta = NULL;
  1204. }
  1205. static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
  1206. {
  1207. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1208. struct pblk_gc *gc = &pblk->gc;
  1209. spin_lock(&line->lock);
  1210. WARN_ON(line->state != PBLK_LINESTATE_GC);
  1211. line->state = PBLK_LINESTATE_FREE;
  1212. line->gc_group = PBLK_LINEGC_NONE;
  1213. pblk_line_free(pblk, line);
  1214. spin_unlock(&line->lock);
  1215. atomic_dec(&gc->pipeline_gc);
  1216. spin_lock(&l_mg->free_lock);
  1217. list_add_tail(&line->list, &l_mg->free_list);
  1218. l_mg->nr_free_lines++;
  1219. spin_unlock(&l_mg->free_lock);
  1220. pblk_rl_free_lines_inc(&pblk->rl, line);
  1221. }
  1222. static void pblk_line_put_ws(struct work_struct *work)
  1223. {
  1224. struct pblk_line_ws *line_put_ws = container_of(work,
  1225. struct pblk_line_ws, ws);
  1226. struct pblk *pblk = line_put_ws->pblk;
  1227. struct pblk_line *line = line_put_ws->line;
  1228. __pblk_line_put(pblk, line);
  1229. mempool_free(line_put_ws, pblk->gen_ws_pool);
  1230. }
  1231. void pblk_line_put(struct kref *ref)
  1232. {
  1233. struct pblk_line *line = container_of(ref, struct pblk_line, ref);
  1234. struct pblk *pblk = line->pblk;
  1235. __pblk_line_put(pblk, line);
  1236. }
  1237. void pblk_line_put_wq(struct kref *ref)
  1238. {
  1239. struct pblk_line *line = container_of(ref, struct pblk_line, ref);
  1240. struct pblk *pblk = line->pblk;
  1241. struct pblk_line_ws *line_put_ws;
  1242. line_put_ws = mempool_alloc(pblk->gen_ws_pool, GFP_ATOMIC);
  1243. if (!line_put_ws)
  1244. return;
  1245. line_put_ws->pblk = pblk;
  1246. line_put_ws->line = line;
  1247. line_put_ws->priv = NULL;
  1248. INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
  1249. queue_work(pblk->r_end_wq, &line_put_ws->ws);
  1250. }
  1251. int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
  1252. {
  1253. struct nvm_rq *rqd;
  1254. int err;
  1255. rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
  1256. pblk_setup_e_rq(pblk, rqd, ppa);
  1257. rqd->end_io = pblk_end_io_erase;
  1258. rqd->private = pblk;
  1259. /* The write thread schedules erases so that it minimizes disturbances
  1260. * with writes. Thus, there is no need to take the LUN semaphore.
  1261. */
  1262. err = pblk_submit_io(pblk, rqd);
  1263. if (err) {
  1264. struct nvm_tgt_dev *dev = pblk->dev;
  1265. struct nvm_geo *geo = &dev->geo;
  1266. pr_err("pblk: could not async erase line:%d,blk:%d\n",
  1267. pblk_ppa_to_line(ppa),
  1268. pblk_ppa_to_pos(geo, ppa));
  1269. }
  1270. return err;
  1271. }
  1272. struct pblk_line *pblk_line_get_data(struct pblk *pblk)
  1273. {
  1274. return pblk->l_mg.data_line;
  1275. }
  1276. /* For now, always erase next line */
  1277. struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
  1278. {
  1279. return pblk->l_mg.data_next;
  1280. }
  1281. int pblk_line_is_full(struct pblk_line *line)
  1282. {
  1283. return (line->left_msecs == 0);
  1284. }
  1285. static void pblk_line_should_sync_meta(struct pblk *pblk)
  1286. {
  1287. if (pblk_rl_is_limit(&pblk->rl))
  1288. pblk_line_close_meta_sync(pblk);
  1289. }
  1290. void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
  1291. {
  1292. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1293. struct list_head *move_list;
  1294. #ifdef CONFIG_NVM_DEBUG
  1295. struct pblk_line_meta *lm = &pblk->lm;
  1296. WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
  1297. "pblk: corrupt closed line %d\n", line->id);
  1298. #endif
  1299. spin_lock(&l_mg->free_lock);
  1300. WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
  1301. spin_unlock(&l_mg->free_lock);
  1302. spin_lock(&l_mg->gc_lock);
  1303. spin_lock(&line->lock);
  1304. WARN_ON(line->state != PBLK_LINESTATE_OPEN);
  1305. line->state = PBLK_LINESTATE_CLOSED;
  1306. move_list = pblk_line_gc_list(pblk, line);
  1307. list_add_tail(&line->list, move_list);
  1308. kfree(line->map_bitmap);
  1309. line->map_bitmap = NULL;
  1310. line->smeta = NULL;
  1311. line->emeta = NULL;
  1312. spin_unlock(&line->lock);
  1313. spin_unlock(&l_mg->gc_lock);
  1314. }
  1315. void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
  1316. {
  1317. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1318. struct pblk_line_meta *lm = &pblk->lm;
  1319. struct pblk_emeta *emeta = line->emeta;
  1320. struct line_emeta *emeta_buf = emeta->buf;
  1321. /* No need for exact vsc value; avoid a big line lock and take aprox. */
  1322. memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
  1323. memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
  1324. emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
  1325. emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
  1326. spin_lock(&l_mg->close_lock);
  1327. spin_lock(&line->lock);
  1328. list_add_tail(&line->list, &l_mg->emeta_list);
  1329. spin_unlock(&line->lock);
  1330. spin_unlock(&l_mg->close_lock);
  1331. pblk_line_should_sync_meta(pblk);
  1332. }
  1333. void pblk_line_close_ws(struct work_struct *work)
  1334. {
  1335. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  1336. ws);
  1337. struct pblk *pblk = line_ws->pblk;
  1338. struct pblk_line *line = line_ws->line;
  1339. pblk_line_close(pblk, line);
  1340. mempool_free(line_ws, pblk->gen_ws_pool);
  1341. }
  1342. void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
  1343. void (*work)(struct work_struct *), gfp_t gfp_mask,
  1344. struct workqueue_struct *wq)
  1345. {
  1346. struct pblk_line_ws *line_ws;
  1347. line_ws = mempool_alloc(pblk->gen_ws_pool, gfp_mask);
  1348. line_ws->pblk = pblk;
  1349. line_ws->line = line;
  1350. line_ws->priv = priv;
  1351. INIT_WORK(&line_ws->ws, work);
  1352. queue_work(wq, &line_ws->ws);
  1353. }
  1354. static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
  1355. int nr_ppas, int pos)
  1356. {
  1357. struct pblk_lun *rlun = &pblk->luns[pos];
  1358. int ret;
  1359. /*
  1360. * Only send one inflight I/O per LUN. Since we map at a page
  1361. * granurality, all ppas in the I/O will map to the same LUN
  1362. */
  1363. #ifdef CONFIG_NVM_DEBUG
  1364. int i;
  1365. for (i = 1; i < nr_ppas; i++)
  1366. WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
  1367. ppa_list[0].g.ch != ppa_list[i].g.ch);
  1368. #endif
  1369. ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
  1370. if (ret == -ETIME || ret == -EINTR)
  1371. pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret);
  1372. }
  1373. void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
  1374. {
  1375. struct nvm_tgt_dev *dev = pblk->dev;
  1376. struct nvm_geo *geo = &dev->geo;
  1377. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1378. __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
  1379. }
  1380. void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1381. unsigned long *lun_bitmap)
  1382. {
  1383. struct nvm_tgt_dev *dev = pblk->dev;
  1384. struct nvm_geo *geo = &dev->geo;
  1385. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1386. /* If the LUN has been locked for this same request, do no attempt to
  1387. * lock it again
  1388. */
  1389. if (test_and_set_bit(pos, lun_bitmap))
  1390. return;
  1391. __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
  1392. }
  1393. void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
  1394. {
  1395. struct nvm_tgt_dev *dev = pblk->dev;
  1396. struct nvm_geo *geo = &dev->geo;
  1397. struct pblk_lun *rlun;
  1398. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1399. #ifdef CONFIG_NVM_DEBUG
  1400. int i;
  1401. for (i = 1; i < nr_ppas; i++)
  1402. WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
  1403. ppa_list[0].g.ch != ppa_list[i].g.ch);
  1404. #endif
  1405. rlun = &pblk->luns[pos];
  1406. up(&rlun->wr_sem);
  1407. }
  1408. void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1409. unsigned long *lun_bitmap)
  1410. {
  1411. struct nvm_tgt_dev *dev = pblk->dev;
  1412. struct nvm_geo *geo = &dev->geo;
  1413. struct pblk_lun *rlun;
  1414. int nr_luns = geo->all_luns;
  1415. int bit = -1;
  1416. while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
  1417. rlun = &pblk->luns[bit];
  1418. up(&rlun->wr_sem);
  1419. }
  1420. }
  1421. void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1422. {
  1423. struct ppa_addr ppa_l2p;
  1424. /* logic error: lba out-of-bounds. Ignore update */
  1425. if (!(lba < pblk->rl.nr_secs)) {
  1426. WARN(1, "pblk: corrupted L2P map request\n");
  1427. return;
  1428. }
  1429. spin_lock(&pblk->trans_lock);
  1430. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1431. if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
  1432. pblk_map_invalidate(pblk, ppa_l2p);
  1433. pblk_trans_map_set(pblk, lba, ppa);
  1434. spin_unlock(&pblk->trans_lock);
  1435. }
  1436. void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1437. {
  1438. #ifdef CONFIG_NVM_DEBUG
  1439. /* Callers must ensure that the ppa points to a cache address */
  1440. BUG_ON(!pblk_addr_in_cache(ppa));
  1441. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
  1442. #endif
  1443. pblk_update_map(pblk, lba, ppa);
  1444. }
  1445. int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
  1446. struct pblk_line *gc_line, u64 paddr_gc)
  1447. {
  1448. struct ppa_addr ppa_l2p, ppa_gc;
  1449. int ret = 1;
  1450. #ifdef CONFIG_NVM_DEBUG
  1451. /* Callers must ensure that the ppa points to a cache address */
  1452. BUG_ON(!pblk_addr_in_cache(ppa_new));
  1453. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
  1454. #endif
  1455. /* logic error: lba out-of-bounds. Ignore update */
  1456. if (!(lba < pblk->rl.nr_secs)) {
  1457. WARN(1, "pblk: corrupted L2P map request\n");
  1458. return 0;
  1459. }
  1460. spin_lock(&pblk->trans_lock);
  1461. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1462. ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
  1463. if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
  1464. spin_lock(&gc_line->lock);
  1465. WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
  1466. "pblk: corrupted GC update");
  1467. spin_unlock(&gc_line->lock);
  1468. ret = 0;
  1469. goto out;
  1470. }
  1471. pblk_trans_map_set(pblk, lba, ppa_new);
  1472. out:
  1473. spin_unlock(&pblk->trans_lock);
  1474. return ret;
  1475. }
  1476. void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
  1477. struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
  1478. {
  1479. struct ppa_addr ppa_l2p;
  1480. #ifdef CONFIG_NVM_DEBUG
  1481. /* Callers must ensure that the ppa points to a device address */
  1482. BUG_ON(pblk_addr_in_cache(ppa_mapped));
  1483. #endif
  1484. /* Invalidate and discard padded entries */
  1485. if (lba == ADDR_EMPTY) {
  1486. #ifdef CONFIG_NVM_DEBUG
  1487. atomic_long_inc(&pblk->padded_wb);
  1488. #endif
  1489. if (!pblk_ppa_empty(ppa_mapped))
  1490. pblk_map_invalidate(pblk, ppa_mapped);
  1491. return;
  1492. }
  1493. /* logic error: lba out-of-bounds. Ignore update */
  1494. if (!(lba < pblk->rl.nr_secs)) {
  1495. WARN(1, "pblk: corrupted L2P map request\n");
  1496. return;
  1497. }
  1498. spin_lock(&pblk->trans_lock);
  1499. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1500. /* Do not update L2P if the cacheline has been updated. In this case,
  1501. * the mapped ppa must be invalidated
  1502. */
  1503. if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
  1504. if (!pblk_ppa_empty(ppa_mapped))
  1505. pblk_map_invalidate(pblk, ppa_mapped);
  1506. goto out;
  1507. }
  1508. #ifdef CONFIG_NVM_DEBUG
  1509. WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
  1510. #endif
  1511. pblk_trans_map_set(pblk, lba, ppa_mapped);
  1512. out:
  1513. spin_unlock(&pblk->trans_lock);
  1514. }
  1515. void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
  1516. sector_t blba, int nr_secs)
  1517. {
  1518. int i;
  1519. spin_lock(&pblk->trans_lock);
  1520. for (i = 0; i < nr_secs; i++) {
  1521. struct ppa_addr ppa;
  1522. ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
  1523. /* If the L2P entry maps to a line, the reference is valid */
  1524. if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
  1525. int line_id = pblk_ppa_to_line(ppa);
  1526. struct pblk_line *line = &pblk->lines[line_id];
  1527. kref_get(&line->ref);
  1528. }
  1529. }
  1530. spin_unlock(&pblk->trans_lock);
  1531. }
  1532. void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
  1533. u64 *lba_list, int nr_secs)
  1534. {
  1535. u64 lba;
  1536. int i;
  1537. spin_lock(&pblk->trans_lock);
  1538. for (i = 0; i < nr_secs; i++) {
  1539. lba = lba_list[i];
  1540. if (lba != ADDR_EMPTY) {
  1541. /* logic error: lba out-of-bounds. Ignore update */
  1542. if (!(lba < pblk->rl.nr_secs)) {
  1543. WARN(1, "pblk: corrupted L2P map request\n");
  1544. continue;
  1545. }
  1546. ppas[i] = pblk_trans_map_get(pblk, lba);
  1547. }
  1548. }
  1549. spin_unlock(&pblk->trans_lock);
  1550. }