pblk-core.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-core.c - pblk's core functionality
  16. *
  17. */
  18. #include "pblk.h"
  19. static void pblk_line_mark_bb(struct work_struct *work)
  20. {
  21. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  22. ws);
  23. struct pblk *pblk = line_ws->pblk;
  24. struct nvm_tgt_dev *dev = pblk->dev;
  25. struct ppa_addr *ppa = line_ws->priv;
  26. int ret;
  27. ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
  28. if (ret) {
  29. struct pblk_line *line;
  30. int pos;
  31. line = &pblk->lines[pblk_ppa_to_line(*ppa)];
  32. pos = pblk_ppa_to_pos(&dev->geo, *ppa);
  33. pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
  34. line->id, pos);
  35. }
  36. kfree(ppa);
  37. mempool_free(line_ws, &pblk->gen_ws_pool);
  38. }
  39. static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
  40. struct ppa_addr ppa_addr)
  41. {
  42. struct nvm_tgt_dev *dev = pblk->dev;
  43. struct nvm_geo *geo = &dev->geo;
  44. struct ppa_addr *ppa;
  45. int pos = pblk_ppa_to_pos(geo, ppa_addr);
  46. pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
  47. atomic_long_inc(&pblk->erase_failed);
  48. atomic_dec(&line->blk_in_line);
  49. if (test_and_set_bit(pos, line->blk_bitmap))
  50. pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
  51. line->id, pos);
  52. /* Not necessary to mark bad blocks on 2.0 spec. */
  53. if (geo->version == NVM_OCSSD_SPEC_20)
  54. return;
  55. ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
  56. if (!ppa)
  57. return;
  58. *ppa = ppa_addr;
  59. pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
  60. GFP_ATOMIC, pblk->bb_wq);
  61. }
  62. static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
  63. {
  64. struct nvm_tgt_dev *dev = pblk->dev;
  65. struct nvm_geo *geo = &dev->geo;
  66. struct nvm_chk_meta *chunk;
  67. struct pblk_line *line;
  68. int pos;
  69. line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
  70. pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
  71. chunk = &line->chks[pos];
  72. atomic_dec(&line->left_seblks);
  73. if (rqd->error) {
  74. chunk->state = NVM_CHK_ST_OFFLINE;
  75. pblk_mark_bb(pblk, line, rqd->ppa_addr);
  76. } else {
  77. chunk->state = NVM_CHK_ST_FREE;
  78. }
  79. atomic_dec(&pblk->inflight_io);
  80. }
  81. /* Erase completion assumes that only one block is erased at the time */
  82. static void pblk_end_io_erase(struct nvm_rq *rqd)
  83. {
  84. struct pblk *pblk = rqd->private;
  85. __pblk_end_io_erase(pblk, rqd);
  86. mempool_free(rqd, &pblk->e_rq_pool);
  87. }
  88. /*
  89. * Get information for all chunks from the device.
  90. *
  91. * The caller is responsible for freeing the returned structure
  92. */
  93. struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
  94. {
  95. struct nvm_tgt_dev *dev = pblk->dev;
  96. struct nvm_geo *geo = &dev->geo;
  97. struct nvm_chk_meta *meta;
  98. struct ppa_addr ppa;
  99. unsigned long len;
  100. int ret;
  101. ppa.ppa = 0;
  102. len = geo->all_chunks * sizeof(*meta);
  103. meta = kzalloc(len, GFP_KERNEL);
  104. if (!meta)
  105. return ERR_PTR(-ENOMEM);
  106. ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks);
  107. if (ret) {
  108. kfree(meta);
  109. return ERR_PTR(-EIO);
  110. }
  111. return meta;
  112. }
  113. struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
  114. struct nvm_chk_meta *meta,
  115. struct ppa_addr ppa)
  116. {
  117. struct nvm_tgt_dev *dev = pblk->dev;
  118. struct nvm_geo *geo = &dev->geo;
  119. int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
  120. int lun_off = ppa.m.pu * geo->num_chk;
  121. int chk_off = ppa.m.chk;
  122. return meta + ch_off + lun_off + chk_off;
  123. }
  124. void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
  125. u64 paddr)
  126. {
  127. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  128. struct list_head *move_list = NULL;
  129. /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
  130. * table is modified with reclaimed sectors, a check is done to endure
  131. * that newer updates are not overwritten.
  132. */
  133. spin_lock(&line->lock);
  134. WARN_ON(line->state == PBLK_LINESTATE_FREE);
  135. if (test_and_set_bit(paddr, line->invalid_bitmap)) {
  136. WARN_ONCE(1, "pblk: double invalidate\n");
  137. spin_unlock(&line->lock);
  138. return;
  139. }
  140. le32_add_cpu(line->vsc, -1);
  141. if (line->state == PBLK_LINESTATE_CLOSED)
  142. move_list = pblk_line_gc_list(pblk, line);
  143. spin_unlock(&line->lock);
  144. if (move_list) {
  145. spin_lock(&l_mg->gc_lock);
  146. spin_lock(&line->lock);
  147. /* Prevent moving a line that has just been chosen for GC */
  148. if (line->state == PBLK_LINESTATE_GC) {
  149. spin_unlock(&line->lock);
  150. spin_unlock(&l_mg->gc_lock);
  151. return;
  152. }
  153. spin_unlock(&line->lock);
  154. list_move_tail(&line->list, move_list);
  155. spin_unlock(&l_mg->gc_lock);
  156. }
  157. }
  158. void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
  159. {
  160. struct pblk_line *line;
  161. u64 paddr;
  162. int line_id;
  163. #ifdef CONFIG_NVM_DEBUG
  164. /* Callers must ensure that the ppa points to a device address */
  165. BUG_ON(pblk_addr_in_cache(ppa));
  166. BUG_ON(pblk_ppa_empty(ppa));
  167. #endif
  168. line_id = pblk_ppa_to_line(ppa);
  169. line = &pblk->lines[line_id];
  170. paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
  171. __pblk_map_invalidate(pblk, line, paddr);
  172. }
  173. static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
  174. unsigned int nr_secs)
  175. {
  176. sector_t lba;
  177. spin_lock(&pblk->trans_lock);
  178. for (lba = slba; lba < slba + nr_secs; lba++) {
  179. struct ppa_addr ppa;
  180. ppa = pblk_trans_map_get(pblk, lba);
  181. if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
  182. pblk_map_invalidate(pblk, ppa);
  183. pblk_ppa_set_empty(&ppa);
  184. pblk_trans_map_set(pblk, lba, ppa);
  185. }
  186. spin_unlock(&pblk->trans_lock);
  187. }
  188. /* Caller must guarantee that the request is a valid type */
  189. struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
  190. {
  191. mempool_t *pool;
  192. struct nvm_rq *rqd;
  193. int rq_size;
  194. switch (type) {
  195. case PBLK_WRITE:
  196. case PBLK_WRITE_INT:
  197. pool = &pblk->w_rq_pool;
  198. rq_size = pblk_w_rq_size;
  199. break;
  200. case PBLK_READ:
  201. pool = &pblk->r_rq_pool;
  202. rq_size = pblk_g_rq_size;
  203. break;
  204. default:
  205. pool = &pblk->e_rq_pool;
  206. rq_size = pblk_g_rq_size;
  207. }
  208. rqd = mempool_alloc(pool, GFP_KERNEL);
  209. memset(rqd, 0, rq_size);
  210. return rqd;
  211. }
  212. /* Typically used on completion path. Cannot guarantee request consistency */
  213. void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
  214. {
  215. struct nvm_tgt_dev *dev = pblk->dev;
  216. mempool_t *pool;
  217. switch (type) {
  218. case PBLK_WRITE:
  219. kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
  220. case PBLK_WRITE_INT:
  221. pool = &pblk->w_rq_pool;
  222. break;
  223. case PBLK_READ:
  224. pool = &pblk->r_rq_pool;
  225. break;
  226. case PBLK_ERASE:
  227. pool = &pblk->e_rq_pool;
  228. break;
  229. default:
  230. pr_err("pblk: trying to free unknown rqd type\n");
  231. return;
  232. }
  233. if (rqd->meta_list)
  234. nvm_dev_dma_free(dev->parent, rqd->meta_list,
  235. rqd->dma_meta_list);
  236. mempool_free(rqd, pool);
  237. }
  238. void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
  239. int nr_pages)
  240. {
  241. struct bio_vec bv;
  242. int i;
  243. WARN_ON(off + nr_pages != bio->bi_vcnt);
  244. for (i = off; i < nr_pages + off; i++) {
  245. bv = bio->bi_io_vec[i];
  246. mempool_free(bv.bv_page, &pblk->page_bio_pool);
  247. }
  248. }
  249. int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
  250. int nr_pages)
  251. {
  252. struct request_queue *q = pblk->dev->q;
  253. struct page *page;
  254. int i, ret;
  255. for (i = 0; i < nr_pages; i++) {
  256. page = mempool_alloc(&pblk->page_bio_pool, flags);
  257. ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
  258. if (ret != PBLK_EXPOSED_PAGE_SIZE) {
  259. pr_err("pblk: could not add page to bio\n");
  260. mempool_free(page, &pblk->page_bio_pool);
  261. goto err;
  262. }
  263. }
  264. return 0;
  265. err:
  266. pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
  267. return -1;
  268. }
  269. void pblk_write_kick(struct pblk *pblk)
  270. {
  271. wake_up_process(pblk->writer_ts);
  272. mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
  273. }
  274. void pblk_write_timer_fn(struct timer_list *t)
  275. {
  276. struct pblk *pblk = from_timer(pblk, t, wtimer);
  277. /* kick the write thread every tick to flush outstanding data */
  278. pblk_write_kick(pblk);
  279. }
  280. void pblk_write_should_kick(struct pblk *pblk)
  281. {
  282. unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
  283. if (secs_avail >= pblk->min_write_pgs)
  284. pblk_write_kick(pblk);
  285. }
  286. static void pblk_wait_for_meta(struct pblk *pblk)
  287. {
  288. do {
  289. if (!atomic_read(&pblk->inflight_io))
  290. break;
  291. schedule();
  292. } while (1);
  293. }
  294. static void pblk_flush_writer(struct pblk *pblk)
  295. {
  296. pblk_rb_flush(&pblk->rwb);
  297. do {
  298. if (!pblk_rb_sync_count(&pblk->rwb))
  299. break;
  300. pblk_write_kick(pblk);
  301. schedule();
  302. } while (1);
  303. }
  304. struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
  305. {
  306. struct pblk_line_meta *lm = &pblk->lm;
  307. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  308. struct list_head *move_list = NULL;
  309. int vsc = le32_to_cpu(*line->vsc);
  310. lockdep_assert_held(&line->lock);
  311. if (line->w_err_gc->has_write_err) {
  312. if (line->gc_group != PBLK_LINEGC_WERR) {
  313. line->gc_group = PBLK_LINEGC_WERR;
  314. move_list = &l_mg->gc_werr_list;
  315. pblk_rl_werr_line_in(&pblk->rl);
  316. }
  317. } else if (!vsc) {
  318. if (line->gc_group != PBLK_LINEGC_FULL) {
  319. line->gc_group = PBLK_LINEGC_FULL;
  320. move_list = &l_mg->gc_full_list;
  321. }
  322. } else if (vsc < lm->high_thrs) {
  323. if (line->gc_group != PBLK_LINEGC_HIGH) {
  324. line->gc_group = PBLK_LINEGC_HIGH;
  325. move_list = &l_mg->gc_high_list;
  326. }
  327. } else if (vsc < lm->mid_thrs) {
  328. if (line->gc_group != PBLK_LINEGC_MID) {
  329. line->gc_group = PBLK_LINEGC_MID;
  330. move_list = &l_mg->gc_mid_list;
  331. }
  332. } else if (vsc < line->sec_in_line) {
  333. if (line->gc_group != PBLK_LINEGC_LOW) {
  334. line->gc_group = PBLK_LINEGC_LOW;
  335. move_list = &l_mg->gc_low_list;
  336. }
  337. } else if (vsc == line->sec_in_line) {
  338. if (line->gc_group != PBLK_LINEGC_EMPTY) {
  339. line->gc_group = PBLK_LINEGC_EMPTY;
  340. move_list = &l_mg->gc_empty_list;
  341. }
  342. } else {
  343. line->state = PBLK_LINESTATE_CORRUPT;
  344. line->gc_group = PBLK_LINEGC_NONE;
  345. move_list = &l_mg->corrupt_list;
  346. pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
  347. line->id, vsc,
  348. line->sec_in_line,
  349. lm->high_thrs, lm->mid_thrs);
  350. }
  351. return move_list;
  352. }
  353. void pblk_discard(struct pblk *pblk, struct bio *bio)
  354. {
  355. sector_t slba = pblk_get_lba(bio);
  356. sector_t nr_secs = pblk_get_secs(bio);
  357. pblk_invalidate_range(pblk, slba, nr_secs);
  358. }
  359. void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
  360. {
  361. atomic_long_inc(&pblk->write_failed);
  362. #ifdef CONFIG_NVM_DEBUG
  363. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  364. #endif
  365. }
  366. void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
  367. {
  368. /* Empty page read is not necessarily an error (e.g., L2P recovery) */
  369. if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
  370. atomic_long_inc(&pblk->read_empty);
  371. return;
  372. }
  373. switch (rqd->error) {
  374. case NVM_RSP_WARN_HIGHECC:
  375. atomic_long_inc(&pblk->read_high_ecc);
  376. break;
  377. case NVM_RSP_ERR_FAILECC:
  378. case NVM_RSP_ERR_FAILCRC:
  379. atomic_long_inc(&pblk->read_failed);
  380. break;
  381. default:
  382. pr_err("pblk: unknown read error:%d\n", rqd->error);
  383. }
  384. #ifdef CONFIG_NVM_DEBUG
  385. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  386. #endif
  387. }
  388. void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
  389. {
  390. pblk->sec_per_write = sec_per_write;
  391. }
  392. int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
  393. {
  394. struct nvm_tgt_dev *dev = pblk->dev;
  395. atomic_inc(&pblk->inflight_io);
  396. #ifdef CONFIG_NVM_DEBUG
  397. if (pblk_check_io(pblk, rqd))
  398. return NVM_IO_ERR;
  399. #endif
  400. return nvm_submit_io(dev, rqd);
  401. }
  402. int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
  403. {
  404. struct nvm_tgt_dev *dev = pblk->dev;
  405. atomic_inc(&pblk->inflight_io);
  406. #ifdef CONFIG_NVM_DEBUG
  407. if (pblk_check_io(pblk, rqd))
  408. return NVM_IO_ERR;
  409. #endif
  410. return nvm_submit_io_sync(dev, rqd);
  411. }
  412. static void pblk_bio_map_addr_endio(struct bio *bio)
  413. {
  414. bio_put(bio);
  415. }
  416. struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
  417. unsigned int nr_secs, unsigned int len,
  418. int alloc_type, gfp_t gfp_mask)
  419. {
  420. struct nvm_tgt_dev *dev = pblk->dev;
  421. void *kaddr = data;
  422. struct page *page;
  423. struct bio *bio;
  424. int i, ret;
  425. if (alloc_type == PBLK_KMALLOC_META)
  426. return bio_map_kern(dev->q, kaddr, len, gfp_mask);
  427. bio = bio_kmalloc(gfp_mask, nr_secs);
  428. if (!bio)
  429. return ERR_PTR(-ENOMEM);
  430. for (i = 0; i < nr_secs; i++) {
  431. page = vmalloc_to_page(kaddr);
  432. if (!page) {
  433. pr_err("pblk: could not map vmalloc bio\n");
  434. bio_put(bio);
  435. bio = ERR_PTR(-ENOMEM);
  436. goto out;
  437. }
  438. ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
  439. if (ret != PAGE_SIZE) {
  440. pr_err("pblk: could not add page to bio\n");
  441. bio_put(bio);
  442. bio = ERR_PTR(-ENOMEM);
  443. goto out;
  444. }
  445. kaddr += PAGE_SIZE;
  446. }
  447. bio->bi_end_io = pblk_bio_map_addr_endio;
  448. out:
  449. return bio;
  450. }
  451. int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
  452. unsigned long secs_to_flush)
  453. {
  454. int max = pblk->sec_per_write;
  455. int min = pblk->min_write_pgs;
  456. int secs_to_sync = 0;
  457. if (secs_avail >= max)
  458. secs_to_sync = max;
  459. else if (secs_avail >= min)
  460. secs_to_sync = min * (secs_avail / min);
  461. else if (secs_to_flush)
  462. secs_to_sync = min;
  463. return secs_to_sync;
  464. }
  465. void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  466. {
  467. u64 addr;
  468. int i;
  469. spin_lock(&line->lock);
  470. addr = find_next_zero_bit(line->map_bitmap,
  471. pblk->lm.sec_per_line, line->cur_sec);
  472. line->cur_sec = addr - nr_secs;
  473. for (i = 0; i < nr_secs; i++, line->cur_sec--)
  474. WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
  475. spin_unlock(&line->lock);
  476. }
  477. u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  478. {
  479. u64 addr;
  480. int i;
  481. lockdep_assert_held(&line->lock);
  482. /* logic error: ppa out-of-bounds. Prevent generating bad address */
  483. if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
  484. WARN(1, "pblk: page allocation out of bounds\n");
  485. nr_secs = pblk->lm.sec_per_line - line->cur_sec;
  486. }
  487. line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
  488. pblk->lm.sec_per_line, line->cur_sec);
  489. for (i = 0; i < nr_secs; i++, line->cur_sec++)
  490. WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
  491. return addr;
  492. }
  493. u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  494. {
  495. u64 addr;
  496. /* Lock needed in case a write fails and a recovery needs to remap
  497. * failed write buffer entries
  498. */
  499. spin_lock(&line->lock);
  500. addr = __pblk_alloc_page(pblk, line, nr_secs);
  501. line->left_msecs -= nr_secs;
  502. WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
  503. spin_unlock(&line->lock);
  504. return addr;
  505. }
  506. u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
  507. {
  508. u64 paddr;
  509. spin_lock(&line->lock);
  510. paddr = find_next_zero_bit(line->map_bitmap,
  511. pblk->lm.sec_per_line, line->cur_sec);
  512. spin_unlock(&line->lock);
  513. return paddr;
  514. }
  515. /*
  516. * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
  517. * taking the per LUN semaphore.
  518. */
  519. static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
  520. void *emeta_buf, u64 paddr, int dir)
  521. {
  522. struct nvm_tgt_dev *dev = pblk->dev;
  523. struct nvm_geo *geo = &dev->geo;
  524. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  525. struct pblk_line_meta *lm = &pblk->lm;
  526. void *ppa_list, *meta_list;
  527. struct bio *bio;
  528. struct nvm_rq rqd;
  529. dma_addr_t dma_ppa_list, dma_meta_list;
  530. int min = pblk->min_write_pgs;
  531. int left_ppas = lm->emeta_sec[0];
  532. int id = line->id;
  533. int rq_ppas, rq_len;
  534. int cmd_op, bio_op;
  535. int i, j;
  536. int ret;
  537. if (dir == PBLK_WRITE) {
  538. bio_op = REQ_OP_WRITE;
  539. cmd_op = NVM_OP_PWRITE;
  540. } else if (dir == PBLK_READ) {
  541. bio_op = REQ_OP_READ;
  542. cmd_op = NVM_OP_PREAD;
  543. } else
  544. return -EINVAL;
  545. meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  546. &dma_meta_list);
  547. if (!meta_list)
  548. return -ENOMEM;
  549. ppa_list = meta_list + pblk_dma_meta_size;
  550. dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
  551. next_rq:
  552. memset(&rqd, 0, sizeof(struct nvm_rq));
  553. rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
  554. rq_len = rq_ppas * geo->csecs;
  555. bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
  556. l_mg->emeta_alloc_type, GFP_KERNEL);
  557. if (IS_ERR(bio)) {
  558. ret = PTR_ERR(bio);
  559. goto free_rqd_dma;
  560. }
  561. bio->bi_iter.bi_sector = 0; /* internal bio */
  562. bio_set_op_attrs(bio, bio_op, 0);
  563. rqd.bio = bio;
  564. rqd.meta_list = meta_list;
  565. rqd.ppa_list = ppa_list;
  566. rqd.dma_meta_list = dma_meta_list;
  567. rqd.dma_ppa_list = dma_ppa_list;
  568. rqd.opcode = cmd_op;
  569. rqd.nr_ppas = rq_ppas;
  570. if (dir == PBLK_WRITE) {
  571. struct pblk_sec_meta *meta_list = rqd.meta_list;
  572. rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
  573. for (i = 0; i < rqd.nr_ppas; ) {
  574. spin_lock(&line->lock);
  575. paddr = __pblk_alloc_page(pblk, line, min);
  576. spin_unlock(&line->lock);
  577. for (j = 0; j < min; j++, i++, paddr++) {
  578. meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
  579. rqd.ppa_list[i] =
  580. addr_to_gen_ppa(pblk, paddr, id);
  581. }
  582. }
  583. } else {
  584. for (i = 0; i < rqd.nr_ppas; ) {
  585. struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
  586. int pos = pblk_ppa_to_pos(geo, ppa);
  587. int read_type = PBLK_READ_RANDOM;
  588. if (pblk_io_aligned(pblk, rq_ppas))
  589. read_type = PBLK_READ_SEQUENTIAL;
  590. rqd.flags = pblk_set_read_mode(pblk, read_type);
  591. while (test_bit(pos, line->blk_bitmap)) {
  592. paddr += min;
  593. if (pblk_boundary_paddr_checks(pblk, paddr)) {
  594. pr_err("pblk: corrupt emeta line:%d\n",
  595. line->id);
  596. bio_put(bio);
  597. ret = -EINTR;
  598. goto free_rqd_dma;
  599. }
  600. ppa = addr_to_gen_ppa(pblk, paddr, id);
  601. pos = pblk_ppa_to_pos(geo, ppa);
  602. }
  603. if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
  604. pr_err("pblk: corrupt emeta line:%d\n",
  605. line->id);
  606. bio_put(bio);
  607. ret = -EINTR;
  608. goto free_rqd_dma;
  609. }
  610. for (j = 0; j < min; j++, i++, paddr++)
  611. rqd.ppa_list[i] =
  612. addr_to_gen_ppa(pblk, paddr, line->id);
  613. }
  614. }
  615. ret = pblk_submit_io_sync(pblk, &rqd);
  616. if (ret) {
  617. pr_err("pblk: emeta I/O submission failed: %d\n", ret);
  618. bio_put(bio);
  619. goto free_rqd_dma;
  620. }
  621. atomic_dec(&pblk->inflight_io);
  622. if (rqd.error) {
  623. if (dir == PBLK_WRITE)
  624. pblk_log_write_err(pblk, &rqd);
  625. else
  626. pblk_log_read_err(pblk, &rqd);
  627. }
  628. emeta_buf += rq_len;
  629. left_ppas -= rq_ppas;
  630. if (left_ppas)
  631. goto next_rq;
  632. free_rqd_dma:
  633. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  634. return ret;
  635. }
  636. u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
  637. {
  638. struct nvm_tgt_dev *dev = pblk->dev;
  639. struct nvm_geo *geo = &dev->geo;
  640. struct pblk_line_meta *lm = &pblk->lm;
  641. int bit;
  642. /* This usually only happens on bad lines */
  643. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  644. if (bit >= lm->blk_per_line)
  645. return -1;
  646. return bit * geo->ws_opt;
  647. }
  648. static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
  649. u64 paddr, int dir)
  650. {
  651. struct nvm_tgt_dev *dev = pblk->dev;
  652. struct pblk_line_meta *lm = &pblk->lm;
  653. struct bio *bio;
  654. struct nvm_rq rqd;
  655. __le64 *lba_list = NULL;
  656. int i, ret;
  657. int cmd_op, bio_op;
  658. int flags;
  659. if (dir == PBLK_WRITE) {
  660. bio_op = REQ_OP_WRITE;
  661. cmd_op = NVM_OP_PWRITE;
  662. flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
  663. lba_list = emeta_to_lbas(pblk, line->emeta->buf);
  664. } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
  665. bio_op = REQ_OP_READ;
  666. cmd_op = NVM_OP_PREAD;
  667. flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
  668. } else
  669. return -EINVAL;
  670. memset(&rqd, 0, sizeof(struct nvm_rq));
  671. rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  672. &rqd.dma_meta_list);
  673. if (!rqd.meta_list)
  674. return -ENOMEM;
  675. rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
  676. rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
  677. bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
  678. if (IS_ERR(bio)) {
  679. ret = PTR_ERR(bio);
  680. goto free_ppa_list;
  681. }
  682. bio->bi_iter.bi_sector = 0; /* internal bio */
  683. bio_set_op_attrs(bio, bio_op, 0);
  684. rqd.bio = bio;
  685. rqd.opcode = cmd_op;
  686. rqd.flags = flags;
  687. rqd.nr_ppas = lm->smeta_sec;
  688. for (i = 0; i < lm->smeta_sec; i++, paddr++) {
  689. struct pblk_sec_meta *meta_list = rqd.meta_list;
  690. rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
  691. if (dir == PBLK_WRITE) {
  692. __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
  693. meta_list[i].lba = lba_list[paddr] = addr_empty;
  694. }
  695. }
  696. /*
  697. * This I/O is sent by the write thread when a line is replace. Since
  698. * the write thread is the only one sending write and erase commands,
  699. * there is no need to take the LUN semaphore.
  700. */
  701. ret = pblk_submit_io_sync(pblk, &rqd);
  702. if (ret) {
  703. pr_err("pblk: smeta I/O submission failed: %d\n", ret);
  704. bio_put(bio);
  705. goto free_ppa_list;
  706. }
  707. atomic_dec(&pblk->inflight_io);
  708. if (rqd.error) {
  709. if (dir == PBLK_WRITE) {
  710. pblk_log_write_err(pblk, &rqd);
  711. ret = 1;
  712. } else if (dir == PBLK_READ)
  713. pblk_log_read_err(pblk, &rqd);
  714. }
  715. free_ppa_list:
  716. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  717. return ret;
  718. }
  719. int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
  720. {
  721. u64 bpaddr = pblk_line_smeta_start(pblk, line);
  722. return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
  723. }
  724. int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
  725. void *emeta_buf)
  726. {
  727. return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
  728. line->emeta_ssec, PBLK_READ);
  729. }
  730. static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
  731. struct ppa_addr ppa)
  732. {
  733. rqd->opcode = NVM_OP_ERASE;
  734. rqd->ppa_addr = ppa;
  735. rqd->nr_ppas = 1;
  736. rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
  737. rqd->bio = NULL;
  738. }
  739. static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
  740. {
  741. struct nvm_rq rqd;
  742. int ret = 0;
  743. memset(&rqd, 0, sizeof(struct nvm_rq));
  744. pblk_setup_e_rq(pblk, &rqd, ppa);
  745. /* The write thread schedules erases so that it minimizes disturbances
  746. * with writes. Thus, there is no need to take the LUN semaphore.
  747. */
  748. ret = pblk_submit_io_sync(pblk, &rqd);
  749. if (ret) {
  750. struct nvm_tgt_dev *dev = pblk->dev;
  751. struct nvm_geo *geo = &dev->geo;
  752. pr_err("pblk: could not sync erase line:%d,blk:%d\n",
  753. pblk_ppa_to_line(ppa),
  754. pblk_ppa_to_pos(geo, ppa));
  755. rqd.error = ret;
  756. goto out;
  757. }
  758. out:
  759. rqd.private = pblk;
  760. __pblk_end_io_erase(pblk, &rqd);
  761. return ret;
  762. }
  763. int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
  764. {
  765. struct pblk_line_meta *lm = &pblk->lm;
  766. struct ppa_addr ppa;
  767. int ret, bit = -1;
  768. /* Erase only good blocks, one at a time */
  769. do {
  770. spin_lock(&line->lock);
  771. bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
  772. bit + 1);
  773. if (bit >= lm->blk_per_line) {
  774. spin_unlock(&line->lock);
  775. break;
  776. }
  777. ppa = pblk->luns[bit].bppa; /* set ch and lun */
  778. ppa.a.blk = line->id;
  779. atomic_dec(&line->left_eblks);
  780. WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
  781. spin_unlock(&line->lock);
  782. ret = pblk_blk_erase_sync(pblk, ppa);
  783. if (ret) {
  784. pr_err("pblk: failed to erase line %d\n", line->id);
  785. return ret;
  786. }
  787. } while (1);
  788. return 0;
  789. }
  790. static void pblk_line_setup_metadata(struct pblk_line *line,
  791. struct pblk_line_mgmt *l_mg,
  792. struct pblk_line_meta *lm)
  793. {
  794. int meta_line;
  795. lockdep_assert_held(&l_mg->free_lock);
  796. retry_meta:
  797. meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  798. if (meta_line == PBLK_DATA_LINES) {
  799. spin_unlock(&l_mg->free_lock);
  800. io_schedule();
  801. spin_lock(&l_mg->free_lock);
  802. goto retry_meta;
  803. }
  804. set_bit(meta_line, &l_mg->meta_bitmap);
  805. line->meta_line = meta_line;
  806. line->smeta = l_mg->sline_meta[meta_line];
  807. line->emeta = l_mg->eline_meta[meta_line];
  808. memset(line->smeta, 0, lm->smeta_len);
  809. memset(line->emeta->buf, 0, lm->emeta_len[0]);
  810. line->emeta->mem = 0;
  811. atomic_set(&line->emeta->sync, 0);
  812. }
  813. /* For now lines are always assumed full lines. Thus, smeta former and current
  814. * lun bitmaps are omitted.
  815. */
  816. static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
  817. struct pblk_line *cur)
  818. {
  819. struct nvm_tgt_dev *dev = pblk->dev;
  820. struct nvm_geo *geo = &dev->geo;
  821. struct pblk_line_meta *lm = &pblk->lm;
  822. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  823. struct pblk_emeta *emeta = line->emeta;
  824. struct line_emeta *emeta_buf = emeta->buf;
  825. struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
  826. int nr_blk_line;
  827. /* After erasing the line, new bad blocks might appear and we risk
  828. * having an invalid line
  829. */
  830. nr_blk_line = lm->blk_per_line -
  831. bitmap_weight(line->blk_bitmap, lm->blk_per_line);
  832. if (nr_blk_line < lm->min_blk_line) {
  833. spin_lock(&l_mg->free_lock);
  834. spin_lock(&line->lock);
  835. line->state = PBLK_LINESTATE_BAD;
  836. spin_unlock(&line->lock);
  837. list_add_tail(&line->list, &l_mg->bad_list);
  838. spin_unlock(&l_mg->free_lock);
  839. pr_debug("pblk: line %d is bad\n", line->id);
  840. return 0;
  841. }
  842. /* Run-time metadata */
  843. line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
  844. /* Mark LUNs allocated in this line (all for now) */
  845. bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
  846. smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
  847. memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
  848. smeta_buf->header.id = cpu_to_le32(line->id);
  849. smeta_buf->header.type = cpu_to_le16(line->type);
  850. smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
  851. smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
  852. /* Start metadata */
  853. smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
  854. smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
  855. /* Fill metadata among lines */
  856. if (cur) {
  857. memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
  858. smeta_buf->prev_id = cpu_to_le32(cur->id);
  859. cur->emeta->buf->next_id = cpu_to_le32(line->id);
  860. } else {
  861. smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
  862. }
  863. /* All smeta must be set at this point */
  864. smeta_buf->header.crc = cpu_to_le32(
  865. pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
  866. smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
  867. /* End metadata */
  868. memcpy(&emeta_buf->header, &smeta_buf->header,
  869. sizeof(struct line_header));
  870. emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
  871. emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
  872. emeta_buf->header.crc = cpu_to_le32(
  873. pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
  874. emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
  875. emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
  876. emeta_buf->nr_valid_lbas = cpu_to_le64(0);
  877. emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
  878. emeta_buf->crc = cpu_to_le32(0);
  879. emeta_buf->prev_id = smeta_buf->prev_id;
  880. return 1;
  881. }
  882. /* For now lines are always assumed full lines. Thus, smeta former and current
  883. * lun bitmaps are omitted.
  884. */
  885. static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
  886. int init)
  887. {
  888. struct nvm_tgt_dev *dev = pblk->dev;
  889. struct nvm_geo *geo = &dev->geo;
  890. struct pblk_line_meta *lm = &pblk->lm;
  891. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  892. u64 off;
  893. int bit = -1;
  894. int emeta_secs;
  895. line->sec_in_line = lm->sec_per_line;
  896. /* Capture bad block information on line mapping bitmaps */
  897. while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
  898. bit + 1)) < lm->blk_per_line) {
  899. off = bit * geo->ws_opt;
  900. bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
  901. lm->sec_per_line);
  902. bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
  903. lm->sec_per_line);
  904. line->sec_in_line -= geo->clba;
  905. }
  906. /* Mark smeta metadata sectors as bad sectors */
  907. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  908. off = bit * geo->ws_opt;
  909. bitmap_set(line->map_bitmap, off, lm->smeta_sec);
  910. line->sec_in_line -= lm->smeta_sec;
  911. line->smeta_ssec = off;
  912. line->cur_sec = off + lm->smeta_sec;
  913. if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
  914. pr_debug("pblk: line smeta I/O failed. Retry\n");
  915. return 0;
  916. }
  917. bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
  918. /* Mark emeta metadata sectors as bad sectors. We need to consider bad
  919. * blocks to make sure that there are enough sectors to store emeta
  920. */
  921. emeta_secs = lm->emeta_sec[0];
  922. off = lm->sec_per_line;
  923. while (emeta_secs) {
  924. off -= geo->ws_opt;
  925. if (!test_bit(off, line->invalid_bitmap)) {
  926. bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
  927. emeta_secs -= geo->ws_opt;
  928. }
  929. }
  930. line->emeta_ssec = off;
  931. line->sec_in_line -= lm->emeta_sec[0];
  932. line->nr_valid_lbas = 0;
  933. line->left_msecs = line->sec_in_line;
  934. *line->vsc = cpu_to_le32(line->sec_in_line);
  935. if (lm->sec_per_line - line->sec_in_line !=
  936. bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
  937. spin_lock(&line->lock);
  938. line->state = PBLK_LINESTATE_BAD;
  939. spin_unlock(&line->lock);
  940. list_add_tail(&line->list, &l_mg->bad_list);
  941. pr_err("pblk: unexpected line %d is bad\n", line->id);
  942. return 0;
  943. }
  944. return 1;
  945. }
  946. static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
  947. {
  948. struct pblk_line_meta *lm = &pblk->lm;
  949. struct nvm_tgt_dev *dev = pblk->dev;
  950. struct nvm_geo *geo = &dev->geo;
  951. int blk_to_erase = atomic_read(&line->blk_in_line);
  952. int i;
  953. for (i = 0; i < lm->blk_per_line; i++) {
  954. struct pblk_lun *rlun = &pblk->luns[i];
  955. int pos = pblk_ppa_to_pos(geo, rlun->bppa);
  956. int state = line->chks[pos].state;
  957. /* Free chunks should not be erased */
  958. if (state & NVM_CHK_ST_FREE) {
  959. set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
  960. line->erase_bitmap);
  961. blk_to_erase--;
  962. }
  963. }
  964. return blk_to_erase;
  965. }
  966. static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
  967. {
  968. struct pblk_line_meta *lm = &pblk->lm;
  969. int blk_in_line = atomic_read(&line->blk_in_line);
  970. int blk_to_erase, ret;
  971. line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_ATOMIC);
  972. if (!line->map_bitmap)
  973. return -ENOMEM;
  974. /* will be initialized using bb info from map_bitmap */
  975. line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_ATOMIC);
  976. if (!line->invalid_bitmap) {
  977. ret = -ENOMEM;
  978. goto fail_free_map_bitmap;
  979. }
  980. /* Bad blocks do not need to be erased */
  981. bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
  982. spin_lock(&line->lock);
  983. /* If we have not written to this line, we need to mark up free chunks
  984. * as already erased
  985. */
  986. if (line->state == PBLK_LINESTATE_NEW) {
  987. blk_to_erase = pblk_prepare_new_line(pblk, line);
  988. line->state = PBLK_LINESTATE_FREE;
  989. } else {
  990. blk_to_erase = blk_in_line;
  991. }
  992. if (blk_in_line < lm->min_blk_line) {
  993. ret = -EAGAIN;
  994. goto fail_free_invalid_bitmap;
  995. }
  996. if (line->state != PBLK_LINESTATE_FREE) {
  997. WARN(1, "pblk: corrupted line %d, state %d\n",
  998. line->id, line->state);
  999. ret = -EINTR;
  1000. goto fail_free_invalid_bitmap;
  1001. }
  1002. line->state = PBLK_LINESTATE_OPEN;
  1003. atomic_set(&line->left_eblks, blk_to_erase);
  1004. atomic_set(&line->left_seblks, blk_to_erase);
  1005. line->meta_distance = lm->meta_distance;
  1006. spin_unlock(&line->lock);
  1007. kref_init(&line->ref);
  1008. return 0;
  1009. fail_free_invalid_bitmap:
  1010. spin_unlock(&line->lock);
  1011. kfree(line->invalid_bitmap);
  1012. line->invalid_bitmap = NULL;
  1013. fail_free_map_bitmap:
  1014. kfree(line->map_bitmap);
  1015. line->map_bitmap = NULL;
  1016. return ret;
  1017. }
  1018. int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
  1019. {
  1020. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1021. int ret;
  1022. spin_lock(&l_mg->free_lock);
  1023. l_mg->data_line = line;
  1024. list_del(&line->list);
  1025. ret = pblk_line_prepare(pblk, line);
  1026. if (ret) {
  1027. list_add(&line->list, &l_mg->free_list);
  1028. spin_unlock(&l_mg->free_lock);
  1029. return ret;
  1030. }
  1031. spin_unlock(&l_mg->free_lock);
  1032. pblk_rl_free_lines_dec(&pblk->rl, line, true);
  1033. if (!pblk_line_init_bb(pblk, line, 0)) {
  1034. list_add(&line->list, &l_mg->free_list);
  1035. return -EINTR;
  1036. }
  1037. return 0;
  1038. }
  1039. void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
  1040. {
  1041. kfree(line->map_bitmap);
  1042. line->map_bitmap = NULL;
  1043. line->smeta = NULL;
  1044. line->emeta = NULL;
  1045. }
  1046. struct pblk_line *pblk_line_get(struct pblk *pblk)
  1047. {
  1048. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1049. struct pblk_line_meta *lm = &pblk->lm;
  1050. struct pblk_line *line;
  1051. int ret, bit;
  1052. lockdep_assert_held(&l_mg->free_lock);
  1053. retry:
  1054. if (list_empty(&l_mg->free_list)) {
  1055. pr_err("pblk: no free lines\n");
  1056. return NULL;
  1057. }
  1058. line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
  1059. list_del(&line->list);
  1060. l_mg->nr_free_lines--;
  1061. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  1062. if (unlikely(bit >= lm->blk_per_line)) {
  1063. spin_lock(&line->lock);
  1064. line->state = PBLK_LINESTATE_BAD;
  1065. spin_unlock(&line->lock);
  1066. list_add_tail(&line->list, &l_mg->bad_list);
  1067. pr_debug("pblk: line %d is bad\n", line->id);
  1068. goto retry;
  1069. }
  1070. ret = pblk_line_prepare(pblk, line);
  1071. if (ret) {
  1072. switch (ret) {
  1073. case -EAGAIN:
  1074. list_add(&line->list, &l_mg->bad_list);
  1075. goto retry;
  1076. case -EINTR:
  1077. list_add(&line->list, &l_mg->corrupt_list);
  1078. goto retry;
  1079. default:
  1080. pr_err("pblk: failed to prepare line %d\n", line->id);
  1081. list_add(&line->list, &l_mg->free_list);
  1082. l_mg->nr_free_lines++;
  1083. return NULL;
  1084. }
  1085. }
  1086. return line;
  1087. }
  1088. static struct pblk_line *pblk_line_retry(struct pblk *pblk,
  1089. struct pblk_line *line)
  1090. {
  1091. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1092. struct pblk_line *retry_line;
  1093. retry:
  1094. spin_lock(&l_mg->free_lock);
  1095. retry_line = pblk_line_get(pblk);
  1096. if (!retry_line) {
  1097. l_mg->data_line = NULL;
  1098. spin_unlock(&l_mg->free_lock);
  1099. return NULL;
  1100. }
  1101. retry_line->smeta = line->smeta;
  1102. retry_line->emeta = line->emeta;
  1103. retry_line->meta_line = line->meta_line;
  1104. pblk_line_free(line);
  1105. l_mg->data_line = retry_line;
  1106. spin_unlock(&l_mg->free_lock);
  1107. pblk_rl_free_lines_dec(&pblk->rl, line, false);
  1108. if (pblk_line_erase(pblk, retry_line))
  1109. goto retry;
  1110. return retry_line;
  1111. }
  1112. static void pblk_set_space_limit(struct pblk *pblk)
  1113. {
  1114. struct pblk_rl *rl = &pblk->rl;
  1115. atomic_set(&rl->rb_space, 0);
  1116. }
  1117. struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
  1118. {
  1119. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1120. struct pblk_line *line;
  1121. spin_lock(&l_mg->free_lock);
  1122. line = pblk_line_get(pblk);
  1123. if (!line) {
  1124. spin_unlock(&l_mg->free_lock);
  1125. return NULL;
  1126. }
  1127. line->seq_nr = l_mg->d_seq_nr++;
  1128. line->type = PBLK_LINETYPE_DATA;
  1129. l_mg->data_line = line;
  1130. pblk_line_setup_metadata(line, l_mg, &pblk->lm);
  1131. /* Allocate next line for preparation */
  1132. l_mg->data_next = pblk_line_get(pblk);
  1133. if (!l_mg->data_next) {
  1134. /* If we cannot get a new line, we need to stop the pipeline.
  1135. * Only allow as many writes in as we can store safely and then
  1136. * fail gracefully
  1137. */
  1138. pblk_set_space_limit(pblk);
  1139. l_mg->data_next = NULL;
  1140. } else {
  1141. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1142. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1143. }
  1144. spin_unlock(&l_mg->free_lock);
  1145. if (pblk_line_erase(pblk, line)) {
  1146. line = pblk_line_retry(pblk, line);
  1147. if (!line)
  1148. return NULL;
  1149. }
  1150. retry_setup:
  1151. if (!pblk_line_init_metadata(pblk, line, NULL)) {
  1152. line = pblk_line_retry(pblk, line);
  1153. if (!line)
  1154. return NULL;
  1155. goto retry_setup;
  1156. }
  1157. if (!pblk_line_init_bb(pblk, line, 1)) {
  1158. line = pblk_line_retry(pblk, line);
  1159. if (!line)
  1160. return NULL;
  1161. goto retry_setup;
  1162. }
  1163. pblk_rl_free_lines_dec(&pblk->rl, line, true);
  1164. return line;
  1165. }
  1166. static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
  1167. {
  1168. lockdep_assert_held(&pblk->l_mg.free_lock);
  1169. pblk_set_space_limit(pblk);
  1170. pblk->state = PBLK_STATE_STOPPING;
  1171. }
  1172. static void pblk_line_close_meta_sync(struct pblk *pblk)
  1173. {
  1174. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1175. struct pblk_line_meta *lm = &pblk->lm;
  1176. struct pblk_line *line, *tline;
  1177. LIST_HEAD(list);
  1178. spin_lock(&l_mg->close_lock);
  1179. if (list_empty(&l_mg->emeta_list)) {
  1180. spin_unlock(&l_mg->close_lock);
  1181. return;
  1182. }
  1183. list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
  1184. spin_unlock(&l_mg->close_lock);
  1185. list_for_each_entry_safe(line, tline, &list, list) {
  1186. struct pblk_emeta *emeta = line->emeta;
  1187. while (emeta->mem < lm->emeta_len[0]) {
  1188. int ret;
  1189. ret = pblk_submit_meta_io(pblk, line);
  1190. if (ret) {
  1191. pr_err("pblk: sync meta line %d failed (%d)\n",
  1192. line->id, ret);
  1193. return;
  1194. }
  1195. }
  1196. }
  1197. pblk_wait_for_meta(pblk);
  1198. flush_workqueue(pblk->close_wq);
  1199. }
  1200. void __pblk_pipeline_flush(struct pblk *pblk)
  1201. {
  1202. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1203. int ret;
  1204. spin_lock(&l_mg->free_lock);
  1205. if (pblk->state == PBLK_STATE_RECOVERING ||
  1206. pblk->state == PBLK_STATE_STOPPED) {
  1207. spin_unlock(&l_mg->free_lock);
  1208. return;
  1209. }
  1210. pblk->state = PBLK_STATE_RECOVERING;
  1211. spin_unlock(&l_mg->free_lock);
  1212. pblk_flush_writer(pblk);
  1213. pblk_wait_for_meta(pblk);
  1214. ret = pblk_recov_pad(pblk);
  1215. if (ret) {
  1216. pr_err("pblk: could not close data on teardown(%d)\n", ret);
  1217. return;
  1218. }
  1219. flush_workqueue(pblk->bb_wq);
  1220. pblk_line_close_meta_sync(pblk);
  1221. }
  1222. void __pblk_pipeline_stop(struct pblk *pblk)
  1223. {
  1224. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1225. spin_lock(&l_mg->free_lock);
  1226. pblk->state = PBLK_STATE_STOPPED;
  1227. l_mg->data_line = NULL;
  1228. l_mg->data_next = NULL;
  1229. spin_unlock(&l_mg->free_lock);
  1230. }
  1231. void pblk_pipeline_stop(struct pblk *pblk)
  1232. {
  1233. __pblk_pipeline_flush(pblk);
  1234. __pblk_pipeline_stop(pblk);
  1235. }
  1236. struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
  1237. {
  1238. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1239. struct pblk_line *cur, *new = NULL;
  1240. unsigned int left_seblks;
  1241. cur = l_mg->data_line;
  1242. new = l_mg->data_next;
  1243. if (!new)
  1244. goto out;
  1245. l_mg->data_line = new;
  1246. spin_lock(&l_mg->free_lock);
  1247. pblk_line_setup_metadata(new, l_mg, &pblk->lm);
  1248. spin_unlock(&l_mg->free_lock);
  1249. retry_erase:
  1250. left_seblks = atomic_read(&new->left_seblks);
  1251. if (left_seblks) {
  1252. /* If line is not fully erased, erase it */
  1253. if (atomic_read(&new->left_eblks)) {
  1254. if (pblk_line_erase(pblk, new))
  1255. goto out;
  1256. } else {
  1257. io_schedule();
  1258. }
  1259. goto retry_erase;
  1260. }
  1261. retry_setup:
  1262. if (!pblk_line_init_metadata(pblk, new, cur)) {
  1263. new = pblk_line_retry(pblk, new);
  1264. if (!new)
  1265. goto out;
  1266. goto retry_setup;
  1267. }
  1268. if (!pblk_line_init_bb(pblk, new, 1)) {
  1269. new = pblk_line_retry(pblk, new);
  1270. if (!new)
  1271. goto out;
  1272. goto retry_setup;
  1273. }
  1274. pblk_rl_free_lines_dec(&pblk->rl, new, true);
  1275. /* Allocate next line for preparation */
  1276. spin_lock(&l_mg->free_lock);
  1277. l_mg->data_next = pblk_line_get(pblk);
  1278. if (!l_mg->data_next) {
  1279. /* If we cannot get a new line, we need to stop the pipeline.
  1280. * Only allow as many writes in as we can store safely and then
  1281. * fail gracefully
  1282. */
  1283. pblk_stop_writes(pblk, new);
  1284. l_mg->data_next = NULL;
  1285. } else {
  1286. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1287. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1288. }
  1289. spin_unlock(&l_mg->free_lock);
  1290. out:
  1291. return new;
  1292. }
  1293. void pblk_line_free(struct pblk_line *line)
  1294. {
  1295. kfree(line->map_bitmap);
  1296. kfree(line->invalid_bitmap);
  1297. *line->vsc = cpu_to_le32(EMPTY_ENTRY);
  1298. line->map_bitmap = NULL;
  1299. line->invalid_bitmap = NULL;
  1300. line->smeta = NULL;
  1301. line->emeta = NULL;
  1302. }
  1303. static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
  1304. {
  1305. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1306. struct pblk_gc *gc = &pblk->gc;
  1307. spin_lock(&line->lock);
  1308. WARN_ON(line->state != PBLK_LINESTATE_GC);
  1309. line->state = PBLK_LINESTATE_FREE;
  1310. line->gc_group = PBLK_LINEGC_NONE;
  1311. pblk_line_free(line);
  1312. if (line->w_err_gc->has_write_err) {
  1313. pblk_rl_werr_line_out(&pblk->rl);
  1314. line->w_err_gc->has_write_err = 0;
  1315. }
  1316. spin_unlock(&line->lock);
  1317. atomic_dec(&gc->pipeline_gc);
  1318. spin_lock(&l_mg->free_lock);
  1319. list_add_tail(&line->list, &l_mg->free_list);
  1320. l_mg->nr_free_lines++;
  1321. spin_unlock(&l_mg->free_lock);
  1322. pblk_rl_free_lines_inc(&pblk->rl, line);
  1323. }
  1324. static void pblk_line_put_ws(struct work_struct *work)
  1325. {
  1326. struct pblk_line_ws *line_put_ws = container_of(work,
  1327. struct pblk_line_ws, ws);
  1328. struct pblk *pblk = line_put_ws->pblk;
  1329. struct pblk_line *line = line_put_ws->line;
  1330. __pblk_line_put(pblk, line);
  1331. mempool_free(line_put_ws, &pblk->gen_ws_pool);
  1332. }
  1333. void pblk_line_put(struct kref *ref)
  1334. {
  1335. struct pblk_line *line = container_of(ref, struct pblk_line, ref);
  1336. struct pblk *pblk = line->pblk;
  1337. __pblk_line_put(pblk, line);
  1338. }
  1339. void pblk_line_put_wq(struct kref *ref)
  1340. {
  1341. struct pblk_line *line = container_of(ref, struct pblk_line, ref);
  1342. struct pblk *pblk = line->pblk;
  1343. struct pblk_line_ws *line_put_ws;
  1344. line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
  1345. if (!line_put_ws)
  1346. return;
  1347. line_put_ws->pblk = pblk;
  1348. line_put_ws->line = line;
  1349. line_put_ws->priv = NULL;
  1350. INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
  1351. queue_work(pblk->r_end_wq, &line_put_ws->ws);
  1352. }
  1353. int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
  1354. {
  1355. struct nvm_rq *rqd;
  1356. int err;
  1357. rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
  1358. pblk_setup_e_rq(pblk, rqd, ppa);
  1359. rqd->end_io = pblk_end_io_erase;
  1360. rqd->private = pblk;
  1361. /* The write thread schedules erases so that it minimizes disturbances
  1362. * with writes. Thus, there is no need to take the LUN semaphore.
  1363. */
  1364. err = pblk_submit_io(pblk, rqd);
  1365. if (err) {
  1366. struct nvm_tgt_dev *dev = pblk->dev;
  1367. struct nvm_geo *geo = &dev->geo;
  1368. pr_err("pblk: could not async erase line:%d,blk:%d\n",
  1369. pblk_ppa_to_line(ppa),
  1370. pblk_ppa_to_pos(geo, ppa));
  1371. }
  1372. return err;
  1373. }
  1374. struct pblk_line *pblk_line_get_data(struct pblk *pblk)
  1375. {
  1376. return pblk->l_mg.data_line;
  1377. }
  1378. /* For now, always erase next line */
  1379. struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
  1380. {
  1381. return pblk->l_mg.data_next;
  1382. }
  1383. int pblk_line_is_full(struct pblk_line *line)
  1384. {
  1385. return (line->left_msecs == 0);
  1386. }
  1387. static void pblk_line_should_sync_meta(struct pblk *pblk)
  1388. {
  1389. if (pblk_rl_is_limit(&pblk->rl))
  1390. pblk_line_close_meta_sync(pblk);
  1391. }
  1392. void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
  1393. {
  1394. struct nvm_tgt_dev *dev = pblk->dev;
  1395. struct nvm_geo *geo = &dev->geo;
  1396. struct pblk_line_meta *lm = &pblk->lm;
  1397. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1398. struct list_head *move_list;
  1399. int i;
  1400. #ifdef CONFIG_NVM_DEBUG
  1401. WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
  1402. "pblk: corrupt closed line %d\n", line->id);
  1403. #endif
  1404. spin_lock(&l_mg->free_lock);
  1405. WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
  1406. spin_unlock(&l_mg->free_lock);
  1407. spin_lock(&l_mg->gc_lock);
  1408. spin_lock(&line->lock);
  1409. WARN_ON(line->state != PBLK_LINESTATE_OPEN);
  1410. line->state = PBLK_LINESTATE_CLOSED;
  1411. move_list = pblk_line_gc_list(pblk, line);
  1412. list_add_tail(&line->list, move_list);
  1413. kfree(line->map_bitmap);
  1414. line->map_bitmap = NULL;
  1415. line->smeta = NULL;
  1416. line->emeta = NULL;
  1417. for (i = 0; i < lm->blk_per_line; i++) {
  1418. struct pblk_lun *rlun = &pblk->luns[i];
  1419. int pos = pblk_ppa_to_pos(geo, rlun->bppa);
  1420. int state = line->chks[pos].state;
  1421. if (!(state & NVM_CHK_ST_OFFLINE))
  1422. state = NVM_CHK_ST_CLOSED;
  1423. }
  1424. spin_unlock(&line->lock);
  1425. spin_unlock(&l_mg->gc_lock);
  1426. }
  1427. void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
  1428. {
  1429. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1430. struct pblk_line_meta *lm = &pblk->lm;
  1431. struct pblk_emeta *emeta = line->emeta;
  1432. struct line_emeta *emeta_buf = emeta->buf;
  1433. struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
  1434. /* No need for exact vsc value; avoid a big line lock and take aprox. */
  1435. memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
  1436. memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
  1437. wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
  1438. wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
  1439. wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
  1440. emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
  1441. emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
  1442. spin_lock(&l_mg->close_lock);
  1443. spin_lock(&line->lock);
  1444. /* Update the in-memory start address for emeta, in case it has
  1445. * shifted due to write errors
  1446. */
  1447. if (line->emeta_ssec != line->cur_sec)
  1448. line->emeta_ssec = line->cur_sec;
  1449. list_add_tail(&line->list, &l_mg->emeta_list);
  1450. spin_unlock(&line->lock);
  1451. spin_unlock(&l_mg->close_lock);
  1452. pblk_line_should_sync_meta(pblk);
  1453. }
  1454. static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
  1455. {
  1456. struct pblk_line_meta *lm = &pblk->lm;
  1457. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1458. unsigned int lba_list_size = lm->emeta_len[2];
  1459. struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
  1460. struct pblk_emeta *emeta = line->emeta;
  1461. w_err_gc->lba_list = pblk_malloc(lba_list_size,
  1462. l_mg->emeta_alloc_type, GFP_KERNEL);
  1463. memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
  1464. lba_list_size);
  1465. }
  1466. void pblk_line_close_ws(struct work_struct *work)
  1467. {
  1468. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  1469. ws);
  1470. struct pblk *pblk = line_ws->pblk;
  1471. struct pblk_line *line = line_ws->line;
  1472. struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
  1473. /* Write errors makes the emeta start address stored in smeta invalid,
  1474. * so keep a copy of the lba list until we've gc'd the line
  1475. */
  1476. if (w_err_gc->has_write_err)
  1477. pblk_save_lba_list(pblk, line);
  1478. pblk_line_close(pblk, line);
  1479. mempool_free(line_ws, &pblk->gen_ws_pool);
  1480. }
  1481. void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
  1482. void (*work)(struct work_struct *), gfp_t gfp_mask,
  1483. struct workqueue_struct *wq)
  1484. {
  1485. struct pblk_line_ws *line_ws;
  1486. line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
  1487. line_ws->pblk = pblk;
  1488. line_ws->line = line;
  1489. line_ws->priv = priv;
  1490. INIT_WORK(&line_ws->ws, work);
  1491. queue_work(wq, &line_ws->ws);
  1492. }
  1493. static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
  1494. int nr_ppas, int pos)
  1495. {
  1496. struct pblk_lun *rlun = &pblk->luns[pos];
  1497. int ret;
  1498. /*
  1499. * Only send one inflight I/O per LUN. Since we map at a page
  1500. * granurality, all ppas in the I/O will map to the same LUN
  1501. */
  1502. #ifdef CONFIG_NVM_DEBUG
  1503. int i;
  1504. for (i = 1; i < nr_ppas; i++)
  1505. WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
  1506. ppa_list[0].a.ch != ppa_list[i].a.ch);
  1507. #endif
  1508. ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
  1509. if (ret == -ETIME || ret == -EINTR)
  1510. pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret);
  1511. }
  1512. void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
  1513. {
  1514. struct nvm_tgt_dev *dev = pblk->dev;
  1515. struct nvm_geo *geo = &dev->geo;
  1516. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1517. __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
  1518. }
  1519. void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1520. unsigned long *lun_bitmap)
  1521. {
  1522. struct nvm_tgt_dev *dev = pblk->dev;
  1523. struct nvm_geo *geo = &dev->geo;
  1524. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1525. /* If the LUN has been locked for this same request, do no attempt to
  1526. * lock it again
  1527. */
  1528. if (test_and_set_bit(pos, lun_bitmap))
  1529. return;
  1530. __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
  1531. }
  1532. void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
  1533. {
  1534. struct nvm_tgt_dev *dev = pblk->dev;
  1535. struct nvm_geo *geo = &dev->geo;
  1536. struct pblk_lun *rlun;
  1537. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1538. #ifdef CONFIG_NVM_DEBUG
  1539. int i;
  1540. for (i = 1; i < nr_ppas; i++)
  1541. WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
  1542. ppa_list[0].a.ch != ppa_list[i].a.ch);
  1543. #endif
  1544. rlun = &pblk->luns[pos];
  1545. up(&rlun->wr_sem);
  1546. }
  1547. void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1548. unsigned long *lun_bitmap)
  1549. {
  1550. struct nvm_tgt_dev *dev = pblk->dev;
  1551. struct nvm_geo *geo = &dev->geo;
  1552. struct pblk_lun *rlun;
  1553. int num_lun = geo->all_luns;
  1554. int bit = -1;
  1555. while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
  1556. rlun = &pblk->luns[bit];
  1557. up(&rlun->wr_sem);
  1558. }
  1559. }
  1560. void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1561. {
  1562. struct ppa_addr ppa_l2p;
  1563. /* logic error: lba out-of-bounds. Ignore update */
  1564. if (!(lba < pblk->rl.nr_secs)) {
  1565. WARN(1, "pblk: corrupted L2P map request\n");
  1566. return;
  1567. }
  1568. spin_lock(&pblk->trans_lock);
  1569. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1570. if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
  1571. pblk_map_invalidate(pblk, ppa_l2p);
  1572. pblk_trans_map_set(pblk, lba, ppa);
  1573. spin_unlock(&pblk->trans_lock);
  1574. }
  1575. void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1576. {
  1577. #ifdef CONFIG_NVM_DEBUG
  1578. /* Callers must ensure that the ppa points to a cache address */
  1579. BUG_ON(!pblk_addr_in_cache(ppa));
  1580. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
  1581. #endif
  1582. pblk_update_map(pblk, lba, ppa);
  1583. }
  1584. int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
  1585. struct pblk_line *gc_line, u64 paddr_gc)
  1586. {
  1587. struct ppa_addr ppa_l2p, ppa_gc;
  1588. int ret = 1;
  1589. #ifdef CONFIG_NVM_DEBUG
  1590. /* Callers must ensure that the ppa points to a cache address */
  1591. BUG_ON(!pblk_addr_in_cache(ppa_new));
  1592. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
  1593. #endif
  1594. /* logic error: lba out-of-bounds. Ignore update */
  1595. if (!(lba < pblk->rl.nr_secs)) {
  1596. WARN(1, "pblk: corrupted L2P map request\n");
  1597. return 0;
  1598. }
  1599. spin_lock(&pblk->trans_lock);
  1600. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1601. ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
  1602. if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
  1603. spin_lock(&gc_line->lock);
  1604. WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
  1605. "pblk: corrupted GC update");
  1606. spin_unlock(&gc_line->lock);
  1607. ret = 0;
  1608. goto out;
  1609. }
  1610. pblk_trans_map_set(pblk, lba, ppa_new);
  1611. out:
  1612. spin_unlock(&pblk->trans_lock);
  1613. return ret;
  1614. }
  1615. void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
  1616. struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
  1617. {
  1618. struct ppa_addr ppa_l2p;
  1619. #ifdef CONFIG_NVM_DEBUG
  1620. /* Callers must ensure that the ppa points to a device address */
  1621. BUG_ON(pblk_addr_in_cache(ppa_mapped));
  1622. #endif
  1623. /* Invalidate and discard padded entries */
  1624. if (lba == ADDR_EMPTY) {
  1625. atomic64_inc(&pblk->pad_wa);
  1626. #ifdef CONFIG_NVM_DEBUG
  1627. atomic_long_inc(&pblk->padded_wb);
  1628. #endif
  1629. if (!pblk_ppa_empty(ppa_mapped))
  1630. pblk_map_invalidate(pblk, ppa_mapped);
  1631. return;
  1632. }
  1633. /* logic error: lba out-of-bounds. Ignore update */
  1634. if (!(lba < pblk->rl.nr_secs)) {
  1635. WARN(1, "pblk: corrupted L2P map request\n");
  1636. return;
  1637. }
  1638. spin_lock(&pblk->trans_lock);
  1639. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1640. /* Do not update L2P if the cacheline has been updated. In this case,
  1641. * the mapped ppa must be invalidated
  1642. */
  1643. if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
  1644. if (!pblk_ppa_empty(ppa_mapped))
  1645. pblk_map_invalidate(pblk, ppa_mapped);
  1646. goto out;
  1647. }
  1648. #ifdef CONFIG_NVM_DEBUG
  1649. WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
  1650. #endif
  1651. pblk_trans_map_set(pblk, lba, ppa_mapped);
  1652. out:
  1653. spin_unlock(&pblk->trans_lock);
  1654. }
  1655. void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
  1656. sector_t blba, int nr_secs)
  1657. {
  1658. int i;
  1659. spin_lock(&pblk->trans_lock);
  1660. for (i = 0; i < nr_secs; i++) {
  1661. struct ppa_addr ppa;
  1662. ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
  1663. /* If the L2P entry maps to a line, the reference is valid */
  1664. if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
  1665. int line_id = pblk_ppa_to_line(ppa);
  1666. struct pblk_line *line = &pblk->lines[line_id];
  1667. kref_get(&line->ref);
  1668. }
  1669. }
  1670. spin_unlock(&pblk->trans_lock);
  1671. }
  1672. void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
  1673. u64 *lba_list, int nr_secs)
  1674. {
  1675. u64 lba;
  1676. int i;
  1677. spin_lock(&pblk->trans_lock);
  1678. for (i = 0; i < nr_secs; i++) {
  1679. lba = lba_list[i];
  1680. if (lba != ADDR_EMPTY) {
  1681. /* logic error: lba out-of-bounds. Ignore update */
  1682. if (!(lba < pblk->rl.nr_secs)) {
  1683. WARN(1, "pblk: corrupted L2P map request\n");
  1684. continue;
  1685. }
  1686. ppas[i] = pblk_trans_map_get(pblk, lba);
  1687. }
  1688. }
  1689. spin_unlock(&pblk->trans_lock);
  1690. }