pblk-core.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-core.c - pblk's core functionality
  16. *
  17. */
  18. #include "pblk.h"
  19. static void pblk_line_mark_bb(struct work_struct *work)
  20. {
  21. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  22. ws);
  23. struct pblk *pblk = line_ws->pblk;
  24. struct nvm_tgt_dev *dev = pblk->dev;
  25. struct ppa_addr *ppa = line_ws->priv;
  26. int ret;
  27. ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
  28. if (ret) {
  29. struct pblk_line *line;
  30. int pos;
  31. line = &pblk->lines[pblk_ppa_to_line(*ppa)];
  32. pos = pblk_ppa_to_pos(&dev->geo, *ppa);
  33. pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
  34. line->id, pos);
  35. }
  36. kfree(ppa);
  37. mempool_free(line_ws, &pblk->gen_ws_pool);
  38. }
  39. static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
  40. struct ppa_addr ppa_addr)
  41. {
  42. struct nvm_tgt_dev *dev = pblk->dev;
  43. struct nvm_geo *geo = &dev->geo;
  44. struct ppa_addr *ppa;
  45. int pos = pblk_ppa_to_pos(geo, ppa_addr);
  46. pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
  47. atomic_long_inc(&pblk->erase_failed);
  48. atomic_dec(&line->blk_in_line);
  49. if (test_and_set_bit(pos, line->blk_bitmap))
  50. pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
  51. line->id, pos);
  52. /* Not necessary to mark bad blocks on 2.0 spec. */
  53. if (geo->version == NVM_OCSSD_SPEC_20)
  54. return;
  55. ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
  56. if (!ppa)
  57. return;
  58. *ppa = ppa_addr;
  59. pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
  60. GFP_ATOMIC, pblk->bb_wq);
  61. }
  62. static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
  63. {
  64. struct nvm_tgt_dev *dev = pblk->dev;
  65. struct nvm_geo *geo = &dev->geo;
  66. struct nvm_chk_meta *chunk;
  67. struct pblk_line *line;
  68. int pos;
  69. line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
  70. pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
  71. chunk = &line->chks[pos];
  72. atomic_dec(&line->left_seblks);
  73. if (rqd->error) {
  74. chunk->state = NVM_CHK_ST_OFFLINE;
  75. pblk_mark_bb(pblk, line, rqd->ppa_addr);
  76. } else {
  77. chunk->state = NVM_CHK_ST_FREE;
  78. }
  79. atomic_dec(&pblk->inflight_io);
  80. }
  81. /* Erase completion assumes that only one block is erased at the time */
  82. static void pblk_end_io_erase(struct nvm_rq *rqd)
  83. {
  84. struct pblk *pblk = rqd->private;
  85. __pblk_end_io_erase(pblk, rqd);
  86. mempool_free(rqd, &pblk->e_rq_pool);
  87. }
  88. /*
  89. * Get information for all chunks from the device.
  90. *
  91. * The caller is responsible for freeing the returned structure
  92. */
  93. struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
  94. {
  95. struct nvm_tgt_dev *dev = pblk->dev;
  96. struct nvm_geo *geo = &dev->geo;
  97. struct nvm_chk_meta *meta;
  98. struct ppa_addr ppa;
  99. unsigned long len;
  100. int ret;
  101. ppa.ppa = 0;
  102. len = geo->all_chunks * sizeof(*meta);
  103. meta = kzalloc(len, GFP_KERNEL);
  104. if (!meta)
  105. return ERR_PTR(-ENOMEM);
  106. ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks);
  107. if (ret) {
  108. kfree(meta);
  109. return ERR_PTR(-EIO);
  110. }
  111. return meta;
  112. }
  113. struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
  114. struct nvm_chk_meta *meta,
  115. struct ppa_addr ppa)
  116. {
  117. struct nvm_tgt_dev *dev = pblk->dev;
  118. struct nvm_geo *geo = &dev->geo;
  119. int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
  120. int lun_off = ppa.m.pu * geo->num_chk;
  121. int chk_off = ppa.m.chk;
  122. return meta + ch_off + lun_off + chk_off;
  123. }
  124. void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
  125. u64 paddr)
  126. {
  127. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  128. struct list_head *move_list = NULL;
  129. /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
  130. * table is modified with reclaimed sectors, a check is done to endure
  131. * that newer updates are not overwritten.
  132. */
  133. spin_lock(&line->lock);
  134. WARN_ON(line->state == PBLK_LINESTATE_FREE);
  135. if (test_and_set_bit(paddr, line->invalid_bitmap)) {
  136. WARN_ONCE(1, "pblk: double invalidate\n");
  137. spin_unlock(&line->lock);
  138. return;
  139. }
  140. le32_add_cpu(line->vsc, -1);
  141. if (line->state == PBLK_LINESTATE_CLOSED)
  142. move_list = pblk_line_gc_list(pblk, line);
  143. spin_unlock(&line->lock);
  144. if (move_list) {
  145. spin_lock(&l_mg->gc_lock);
  146. spin_lock(&line->lock);
  147. /* Prevent moving a line that has just been chosen for GC */
  148. if (line->state == PBLK_LINESTATE_GC) {
  149. spin_unlock(&line->lock);
  150. spin_unlock(&l_mg->gc_lock);
  151. return;
  152. }
  153. spin_unlock(&line->lock);
  154. list_move_tail(&line->list, move_list);
  155. spin_unlock(&l_mg->gc_lock);
  156. }
  157. }
  158. void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
  159. {
  160. struct pblk_line *line;
  161. u64 paddr;
  162. int line_id;
  163. #ifdef CONFIG_NVM_PBLK_DEBUG
  164. /* Callers must ensure that the ppa points to a device address */
  165. BUG_ON(pblk_addr_in_cache(ppa));
  166. BUG_ON(pblk_ppa_empty(ppa));
  167. #endif
  168. line_id = pblk_ppa_to_line(ppa);
  169. line = &pblk->lines[line_id];
  170. paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
  171. __pblk_map_invalidate(pblk, line, paddr);
  172. }
  173. static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
  174. unsigned int nr_secs)
  175. {
  176. sector_t lba;
  177. spin_lock(&pblk->trans_lock);
  178. for (lba = slba; lba < slba + nr_secs; lba++) {
  179. struct ppa_addr ppa;
  180. ppa = pblk_trans_map_get(pblk, lba);
  181. if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
  182. pblk_map_invalidate(pblk, ppa);
  183. pblk_ppa_set_empty(&ppa);
  184. pblk_trans_map_set(pblk, lba, ppa);
  185. }
  186. spin_unlock(&pblk->trans_lock);
  187. }
  188. /* Caller must guarantee that the request is a valid type */
  189. struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
  190. {
  191. mempool_t *pool;
  192. struct nvm_rq *rqd;
  193. int rq_size;
  194. switch (type) {
  195. case PBLK_WRITE:
  196. case PBLK_WRITE_INT:
  197. pool = &pblk->w_rq_pool;
  198. rq_size = pblk_w_rq_size;
  199. break;
  200. case PBLK_READ:
  201. pool = &pblk->r_rq_pool;
  202. rq_size = pblk_g_rq_size;
  203. break;
  204. default:
  205. pool = &pblk->e_rq_pool;
  206. rq_size = pblk_g_rq_size;
  207. }
  208. rqd = mempool_alloc(pool, GFP_KERNEL);
  209. memset(rqd, 0, rq_size);
  210. return rqd;
  211. }
  212. /* Typically used on completion path. Cannot guarantee request consistency */
  213. void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
  214. {
  215. struct nvm_tgt_dev *dev = pblk->dev;
  216. mempool_t *pool;
  217. switch (type) {
  218. case PBLK_WRITE:
  219. kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
  220. /* fall through */
  221. case PBLK_WRITE_INT:
  222. pool = &pblk->w_rq_pool;
  223. break;
  224. case PBLK_READ:
  225. pool = &pblk->r_rq_pool;
  226. break;
  227. case PBLK_ERASE:
  228. pool = &pblk->e_rq_pool;
  229. break;
  230. default:
  231. pblk_err(pblk, "trying to free unknown rqd type\n");
  232. return;
  233. }
  234. if (rqd->meta_list)
  235. nvm_dev_dma_free(dev->parent, rqd->meta_list,
  236. rqd->dma_meta_list);
  237. mempool_free(rqd, pool);
  238. }
  239. void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
  240. int nr_pages)
  241. {
  242. struct bio_vec bv;
  243. int i;
  244. WARN_ON(off + nr_pages != bio->bi_vcnt);
  245. for (i = off; i < nr_pages + off; i++) {
  246. bv = bio->bi_io_vec[i];
  247. mempool_free(bv.bv_page, &pblk->page_bio_pool);
  248. }
  249. }
  250. int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
  251. int nr_pages)
  252. {
  253. struct request_queue *q = pblk->dev->q;
  254. struct page *page;
  255. int i, ret;
  256. for (i = 0; i < nr_pages; i++) {
  257. page = mempool_alloc(&pblk->page_bio_pool, flags);
  258. ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
  259. if (ret != PBLK_EXPOSED_PAGE_SIZE) {
  260. pblk_err(pblk, "could not add page to bio\n");
  261. mempool_free(page, &pblk->page_bio_pool);
  262. goto err;
  263. }
  264. }
  265. return 0;
  266. err:
  267. pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
  268. return -1;
  269. }
  270. void pblk_write_kick(struct pblk *pblk)
  271. {
  272. wake_up_process(pblk->writer_ts);
  273. mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
  274. }
  275. void pblk_write_timer_fn(struct timer_list *t)
  276. {
  277. struct pblk *pblk = from_timer(pblk, t, wtimer);
  278. /* kick the write thread every tick to flush outstanding data */
  279. pblk_write_kick(pblk);
  280. }
  281. void pblk_write_should_kick(struct pblk *pblk)
  282. {
  283. unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
  284. if (secs_avail >= pblk->min_write_pgs)
  285. pblk_write_kick(pblk);
  286. }
  287. static void pblk_wait_for_meta(struct pblk *pblk)
  288. {
  289. do {
  290. if (!atomic_read(&pblk->inflight_io))
  291. break;
  292. schedule();
  293. } while (1);
  294. }
  295. static void pblk_flush_writer(struct pblk *pblk)
  296. {
  297. pblk_rb_flush(&pblk->rwb);
  298. do {
  299. if (!pblk_rb_sync_count(&pblk->rwb))
  300. break;
  301. pblk_write_kick(pblk);
  302. schedule();
  303. } while (1);
  304. }
  305. struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
  306. {
  307. struct pblk_line_meta *lm = &pblk->lm;
  308. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  309. struct list_head *move_list = NULL;
  310. int vsc = le32_to_cpu(*line->vsc);
  311. lockdep_assert_held(&line->lock);
  312. if (line->w_err_gc->has_write_err) {
  313. if (line->gc_group != PBLK_LINEGC_WERR) {
  314. line->gc_group = PBLK_LINEGC_WERR;
  315. move_list = &l_mg->gc_werr_list;
  316. pblk_rl_werr_line_in(&pblk->rl);
  317. }
  318. } else if (!vsc) {
  319. if (line->gc_group != PBLK_LINEGC_FULL) {
  320. line->gc_group = PBLK_LINEGC_FULL;
  321. move_list = &l_mg->gc_full_list;
  322. }
  323. } else if (vsc < lm->high_thrs) {
  324. if (line->gc_group != PBLK_LINEGC_HIGH) {
  325. line->gc_group = PBLK_LINEGC_HIGH;
  326. move_list = &l_mg->gc_high_list;
  327. }
  328. } else if (vsc < lm->mid_thrs) {
  329. if (line->gc_group != PBLK_LINEGC_MID) {
  330. line->gc_group = PBLK_LINEGC_MID;
  331. move_list = &l_mg->gc_mid_list;
  332. }
  333. } else if (vsc < line->sec_in_line) {
  334. if (line->gc_group != PBLK_LINEGC_LOW) {
  335. line->gc_group = PBLK_LINEGC_LOW;
  336. move_list = &l_mg->gc_low_list;
  337. }
  338. } else if (vsc == line->sec_in_line) {
  339. if (line->gc_group != PBLK_LINEGC_EMPTY) {
  340. line->gc_group = PBLK_LINEGC_EMPTY;
  341. move_list = &l_mg->gc_empty_list;
  342. }
  343. } else {
  344. line->state = PBLK_LINESTATE_CORRUPT;
  345. line->gc_group = PBLK_LINEGC_NONE;
  346. move_list = &l_mg->corrupt_list;
  347. pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
  348. line->id, vsc,
  349. line->sec_in_line,
  350. lm->high_thrs, lm->mid_thrs);
  351. }
  352. return move_list;
  353. }
  354. void pblk_discard(struct pblk *pblk, struct bio *bio)
  355. {
  356. sector_t slba = pblk_get_lba(bio);
  357. sector_t nr_secs = pblk_get_secs(bio);
  358. pblk_invalidate_range(pblk, slba, nr_secs);
  359. }
  360. void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
  361. {
  362. atomic_long_inc(&pblk->write_failed);
  363. #ifdef CONFIG_NVM_PBLK_DEBUG
  364. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  365. #endif
  366. }
  367. void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
  368. {
  369. /* Empty page read is not necessarily an error (e.g., L2P recovery) */
  370. if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
  371. atomic_long_inc(&pblk->read_empty);
  372. return;
  373. }
  374. switch (rqd->error) {
  375. case NVM_RSP_WARN_HIGHECC:
  376. atomic_long_inc(&pblk->read_high_ecc);
  377. break;
  378. case NVM_RSP_ERR_FAILECC:
  379. case NVM_RSP_ERR_FAILCRC:
  380. atomic_long_inc(&pblk->read_failed);
  381. break;
  382. default:
  383. pblk_err(pblk, "unknown read error:%d\n", rqd->error);
  384. }
  385. #ifdef CONFIG_NVM_PBLK_DEBUG
  386. pblk_print_failed_rqd(pblk, rqd, rqd->error);
  387. #endif
  388. }
  389. void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
  390. {
  391. pblk->sec_per_write = sec_per_write;
  392. }
  393. int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
  394. {
  395. struct nvm_tgt_dev *dev = pblk->dev;
  396. atomic_inc(&pblk->inflight_io);
  397. #ifdef CONFIG_NVM_PBLK_DEBUG
  398. if (pblk_check_io(pblk, rqd))
  399. return NVM_IO_ERR;
  400. #endif
  401. return nvm_submit_io(dev, rqd);
  402. }
  403. int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
  404. {
  405. struct nvm_tgt_dev *dev = pblk->dev;
  406. atomic_inc(&pblk->inflight_io);
  407. #ifdef CONFIG_NVM_PBLK_DEBUG
  408. if (pblk_check_io(pblk, rqd))
  409. return NVM_IO_ERR;
  410. #endif
  411. return nvm_submit_io_sync(dev, rqd);
  412. }
  413. static void pblk_bio_map_addr_endio(struct bio *bio)
  414. {
  415. bio_put(bio);
  416. }
  417. struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
  418. unsigned int nr_secs, unsigned int len,
  419. int alloc_type, gfp_t gfp_mask)
  420. {
  421. struct nvm_tgt_dev *dev = pblk->dev;
  422. void *kaddr = data;
  423. struct page *page;
  424. struct bio *bio;
  425. int i, ret;
  426. if (alloc_type == PBLK_KMALLOC_META)
  427. return bio_map_kern(dev->q, kaddr, len, gfp_mask);
  428. bio = bio_kmalloc(gfp_mask, nr_secs);
  429. if (!bio)
  430. return ERR_PTR(-ENOMEM);
  431. for (i = 0; i < nr_secs; i++) {
  432. page = vmalloc_to_page(kaddr);
  433. if (!page) {
  434. pblk_err(pblk, "could not map vmalloc bio\n");
  435. bio_put(bio);
  436. bio = ERR_PTR(-ENOMEM);
  437. goto out;
  438. }
  439. ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
  440. if (ret != PAGE_SIZE) {
  441. pblk_err(pblk, "could not add page to bio\n");
  442. bio_put(bio);
  443. bio = ERR_PTR(-ENOMEM);
  444. goto out;
  445. }
  446. kaddr += PAGE_SIZE;
  447. }
  448. bio->bi_end_io = pblk_bio_map_addr_endio;
  449. out:
  450. return bio;
  451. }
  452. int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
  453. unsigned long secs_to_flush)
  454. {
  455. int max = pblk->sec_per_write;
  456. int min = pblk->min_write_pgs;
  457. int secs_to_sync = 0;
  458. if (secs_avail >= max)
  459. secs_to_sync = max;
  460. else if (secs_avail >= min)
  461. secs_to_sync = min * (secs_avail / min);
  462. else if (secs_to_flush)
  463. secs_to_sync = min;
  464. return secs_to_sync;
  465. }
  466. void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  467. {
  468. u64 addr;
  469. int i;
  470. spin_lock(&line->lock);
  471. addr = find_next_zero_bit(line->map_bitmap,
  472. pblk->lm.sec_per_line, line->cur_sec);
  473. line->cur_sec = addr - nr_secs;
  474. for (i = 0; i < nr_secs; i++, line->cur_sec--)
  475. WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
  476. spin_unlock(&line->lock);
  477. }
  478. u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  479. {
  480. u64 addr;
  481. int i;
  482. lockdep_assert_held(&line->lock);
  483. /* logic error: ppa out-of-bounds. Prevent generating bad address */
  484. if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
  485. WARN(1, "pblk: page allocation out of bounds\n");
  486. nr_secs = pblk->lm.sec_per_line - line->cur_sec;
  487. }
  488. line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
  489. pblk->lm.sec_per_line, line->cur_sec);
  490. for (i = 0; i < nr_secs; i++, line->cur_sec++)
  491. WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
  492. return addr;
  493. }
  494. u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
  495. {
  496. u64 addr;
  497. /* Lock needed in case a write fails and a recovery needs to remap
  498. * failed write buffer entries
  499. */
  500. spin_lock(&line->lock);
  501. addr = __pblk_alloc_page(pblk, line, nr_secs);
  502. line->left_msecs -= nr_secs;
  503. WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
  504. spin_unlock(&line->lock);
  505. return addr;
  506. }
  507. u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
  508. {
  509. u64 paddr;
  510. spin_lock(&line->lock);
  511. paddr = find_next_zero_bit(line->map_bitmap,
  512. pblk->lm.sec_per_line, line->cur_sec);
  513. spin_unlock(&line->lock);
  514. return paddr;
  515. }
  516. /*
  517. * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
  518. * taking the per LUN semaphore.
  519. */
  520. static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
  521. void *emeta_buf, u64 paddr, int dir)
  522. {
  523. struct nvm_tgt_dev *dev = pblk->dev;
  524. struct nvm_geo *geo = &dev->geo;
  525. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  526. struct pblk_line_meta *lm = &pblk->lm;
  527. void *ppa_list, *meta_list;
  528. struct bio *bio;
  529. struct nvm_rq rqd;
  530. dma_addr_t dma_ppa_list, dma_meta_list;
  531. int min = pblk->min_write_pgs;
  532. int left_ppas = lm->emeta_sec[0];
  533. int id = line->id;
  534. int rq_ppas, rq_len;
  535. int cmd_op, bio_op;
  536. int i, j;
  537. int ret;
  538. if (dir == PBLK_WRITE) {
  539. bio_op = REQ_OP_WRITE;
  540. cmd_op = NVM_OP_PWRITE;
  541. } else if (dir == PBLK_READ) {
  542. bio_op = REQ_OP_READ;
  543. cmd_op = NVM_OP_PREAD;
  544. } else
  545. return -EINVAL;
  546. meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  547. &dma_meta_list);
  548. if (!meta_list)
  549. return -ENOMEM;
  550. ppa_list = meta_list + pblk_dma_meta_size;
  551. dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
  552. next_rq:
  553. memset(&rqd, 0, sizeof(struct nvm_rq));
  554. rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
  555. rq_len = rq_ppas * geo->csecs;
  556. bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
  557. l_mg->emeta_alloc_type, GFP_KERNEL);
  558. if (IS_ERR(bio)) {
  559. ret = PTR_ERR(bio);
  560. goto free_rqd_dma;
  561. }
  562. bio->bi_iter.bi_sector = 0; /* internal bio */
  563. bio_set_op_attrs(bio, bio_op, 0);
  564. rqd.bio = bio;
  565. rqd.meta_list = meta_list;
  566. rqd.ppa_list = ppa_list;
  567. rqd.dma_meta_list = dma_meta_list;
  568. rqd.dma_ppa_list = dma_ppa_list;
  569. rqd.opcode = cmd_op;
  570. rqd.nr_ppas = rq_ppas;
  571. if (dir == PBLK_WRITE) {
  572. struct pblk_sec_meta *meta_list = rqd.meta_list;
  573. rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
  574. for (i = 0; i < rqd.nr_ppas; ) {
  575. spin_lock(&line->lock);
  576. paddr = __pblk_alloc_page(pblk, line, min);
  577. spin_unlock(&line->lock);
  578. for (j = 0; j < min; j++, i++, paddr++) {
  579. meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
  580. rqd.ppa_list[i] =
  581. addr_to_gen_ppa(pblk, paddr, id);
  582. }
  583. }
  584. } else {
  585. for (i = 0; i < rqd.nr_ppas; ) {
  586. struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
  587. int pos = pblk_ppa_to_pos(geo, ppa);
  588. int read_type = PBLK_READ_RANDOM;
  589. if (pblk_io_aligned(pblk, rq_ppas))
  590. read_type = PBLK_READ_SEQUENTIAL;
  591. rqd.flags = pblk_set_read_mode(pblk, read_type);
  592. while (test_bit(pos, line->blk_bitmap)) {
  593. paddr += min;
  594. if (pblk_boundary_paddr_checks(pblk, paddr)) {
  595. pblk_err(pblk, "corrupt emeta line:%d\n",
  596. line->id);
  597. bio_put(bio);
  598. ret = -EINTR;
  599. goto free_rqd_dma;
  600. }
  601. ppa = addr_to_gen_ppa(pblk, paddr, id);
  602. pos = pblk_ppa_to_pos(geo, ppa);
  603. }
  604. if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
  605. pblk_err(pblk, "corrupt emeta line:%d\n",
  606. line->id);
  607. bio_put(bio);
  608. ret = -EINTR;
  609. goto free_rqd_dma;
  610. }
  611. for (j = 0; j < min; j++, i++, paddr++)
  612. rqd.ppa_list[i] =
  613. addr_to_gen_ppa(pblk, paddr, line->id);
  614. }
  615. }
  616. ret = pblk_submit_io_sync(pblk, &rqd);
  617. if (ret) {
  618. pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
  619. bio_put(bio);
  620. goto free_rqd_dma;
  621. }
  622. atomic_dec(&pblk->inflight_io);
  623. if (rqd.error) {
  624. if (dir == PBLK_WRITE)
  625. pblk_log_write_err(pblk, &rqd);
  626. else
  627. pblk_log_read_err(pblk, &rqd);
  628. }
  629. emeta_buf += rq_len;
  630. left_ppas -= rq_ppas;
  631. if (left_ppas)
  632. goto next_rq;
  633. free_rqd_dma:
  634. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  635. return ret;
  636. }
  637. u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
  638. {
  639. struct nvm_tgt_dev *dev = pblk->dev;
  640. struct nvm_geo *geo = &dev->geo;
  641. struct pblk_line_meta *lm = &pblk->lm;
  642. int bit;
  643. /* This usually only happens on bad lines */
  644. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  645. if (bit >= lm->blk_per_line)
  646. return -1;
  647. return bit * geo->ws_opt;
  648. }
  649. static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
  650. u64 paddr, int dir)
  651. {
  652. struct nvm_tgt_dev *dev = pblk->dev;
  653. struct pblk_line_meta *lm = &pblk->lm;
  654. struct bio *bio;
  655. struct nvm_rq rqd;
  656. __le64 *lba_list = NULL;
  657. int i, ret;
  658. int cmd_op, bio_op;
  659. int flags;
  660. if (dir == PBLK_WRITE) {
  661. bio_op = REQ_OP_WRITE;
  662. cmd_op = NVM_OP_PWRITE;
  663. flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
  664. lba_list = emeta_to_lbas(pblk, line->emeta->buf);
  665. } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
  666. bio_op = REQ_OP_READ;
  667. cmd_op = NVM_OP_PREAD;
  668. flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
  669. } else
  670. return -EINVAL;
  671. memset(&rqd, 0, sizeof(struct nvm_rq));
  672. rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
  673. &rqd.dma_meta_list);
  674. if (!rqd.meta_list)
  675. return -ENOMEM;
  676. rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
  677. rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
  678. bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
  679. if (IS_ERR(bio)) {
  680. ret = PTR_ERR(bio);
  681. goto free_ppa_list;
  682. }
  683. bio->bi_iter.bi_sector = 0; /* internal bio */
  684. bio_set_op_attrs(bio, bio_op, 0);
  685. rqd.bio = bio;
  686. rqd.opcode = cmd_op;
  687. rqd.flags = flags;
  688. rqd.nr_ppas = lm->smeta_sec;
  689. for (i = 0; i < lm->smeta_sec; i++, paddr++) {
  690. struct pblk_sec_meta *meta_list = rqd.meta_list;
  691. rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
  692. if (dir == PBLK_WRITE) {
  693. __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
  694. meta_list[i].lba = lba_list[paddr] = addr_empty;
  695. }
  696. }
  697. /*
  698. * This I/O is sent by the write thread when a line is replace. Since
  699. * the write thread is the only one sending write and erase commands,
  700. * there is no need to take the LUN semaphore.
  701. */
  702. ret = pblk_submit_io_sync(pblk, &rqd);
  703. if (ret) {
  704. pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
  705. bio_put(bio);
  706. goto free_ppa_list;
  707. }
  708. atomic_dec(&pblk->inflight_io);
  709. if (rqd.error) {
  710. if (dir == PBLK_WRITE) {
  711. pblk_log_write_err(pblk, &rqd);
  712. ret = 1;
  713. } else if (dir == PBLK_READ)
  714. pblk_log_read_err(pblk, &rqd);
  715. }
  716. free_ppa_list:
  717. nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
  718. return ret;
  719. }
  720. int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
  721. {
  722. u64 bpaddr = pblk_line_smeta_start(pblk, line);
  723. return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
  724. }
  725. int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
  726. void *emeta_buf)
  727. {
  728. return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
  729. line->emeta_ssec, PBLK_READ);
  730. }
  731. static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
  732. struct ppa_addr ppa)
  733. {
  734. rqd->opcode = NVM_OP_ERASE;
  735. rqd->ppa_addr = ppa;
  736. rqd->nr_ppas = 1;
  737. rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
  738. rqd->bio = NULL;
  739. }
  740. static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
  741. {
  742. struct nvm_rq rqd;
  743. int ret = 0;
  744. memset(&rqd, 0, sizeof(struct nvm_rq));
  745. pblk_setup_e_rq(pblk, &rqd, ppa);
  746. /* The write thread schedules erases so that it minimizes disturbances
  747. * with writes. Thus, there is no need to take the LUN semaphore.
  748. */
  749. ret = pblk_submit_io_sync(pblk, &rqd);
  750. if (ret) {
  751. struct nvm_tgt_dev *dev = pblk->dev;
  752. struct nvm_geo *geo = &dev->geo;
  753. pblk_err(pblk, "could not sync erase line:%d,blk:%d\n",
  754. pblk_ppa_to_line(ppa),
  755. pblk_ppa_to_pos(geo, ppa));
  756. rqd.error = ret;
  757. goto out;
  758. }
  759. out:
  760. rqd.private = pblk;
  761. __pblk_end_io_erase(pblk, &rqd);
  762. return ret;
  763. }
  764. int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
  765. {
  766. struct pblk_line_meta *lm = &pblk->lm;
  767. struct ppa_addr ppa;
  768. int ret, bit = -1;
  769. /* Erase only good blocks, one at a time */
  770. do {
  771. spin_lock(&line->lock);
  772. bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
  773. bit + 1);
  774. if (bit >= lm->blk_per_line) {
  775. spin_unlock(&line->lock);
  776. break;
  777. }
  778. ppa = pblk->luns[bit].bppa; /* set ch and lun */
  779. ppa.a.blk = line->id;
  780. atomic_dec(&line->left_eblks);
  781. WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
  782. spin_unlock(&line->lock);
  783. ret = pblk_blk_erase_sync(pblk, ppa);
  784. if (ret) {
  785. pblk_err(pblk, "failed to erase line %d\n", line->id);
  786. return ret;
  787. }
  788. } while (1);
  789. return 0;
  790. }
  791. static void pblk_line_setup_metadata(struct pblk_line *line,
  792. struct pblk_line_mgmt *l_mg,
  793. struct pblk_line_meta *lm)
  794. {
  795. int meta_line;
  796. lockdep_assert_held(&l_mg->free_lock);
  797. retry_meta:
  798. meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  799. if (meta_line == PBLK_DATA_LINES) {
  800. spin_unlock(&l_mg->free_lock);
  801. io_schedule();
  802. spin_lock(&l_mg->free_lock);
  803. goto retry_meta;
  804. }
  805. set_bit(meta_line, &l_mg->meta_bitmap);
  806. line->meta_line = meta_line;
  807. line->smeta = l_mg->sline_meta[meta_line];
  808. line->emeta = l_mg->eline_meta[meta_line];
  809. memset(line->smeta, 0, lm->smeta_len);
  810. memset(line->emeta->buf, 0, lm->emeta_len[0]);
  811. line->emeta->mem = 0;
  812. atomic_set(&line->emeta->sync, 0);
  813. }
  814. /* For now lines are always assumed full lines. Thus, smeta former and current
  815. * lun bitmaps are omitted.
  816. */
  817. static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
  818. struct pblk_line *cur)
  819. {
  820. struct nvm_tgt_dev *dev = pblk->dev;
  821. struct nvm_geo *geo = &dev->geo;
  822. struct pblk_line_meta *lm = &pblk->lm;
  823. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  824. struct pblk_emeta *emeta = line->emeta;
  825. struct line_emeta *emeta_buf = emeta->buf;
  826. struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
  827. int nr_blk_line;
  828. /* After erasing the line, new bad blocks might appear and we risk
  829. * having an invalid line
  830. */
  831. nr_blk_line = lm->blk_per_line -
  832. bitmap_weight(line->blk_bitmap, lm->blk_per_line);
  833. if (nr_blk_line < lm->min_blk_line) {
  834. spin_lock(&l_mg->free_lock);
  835. spin_lock(&line->lock);
  836. line->state = PBLK_LINESTATE_BAD;
  837. spin_unlock(&line->lock);
  838. list_add_tail(&line->list, &l_mg->bad_list);
  839. spin_unlock(&l_mg->free_lock);
  840. pblk_debug(pblk, "line %d is bad\n", line->id);
  841. return 0;
  842. }
  843. /* Run-time metadata */
  844. line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
  845. /* Mark LUNs allocated in this line (all for now) */
  846. bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
  847. smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
  848. memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
  849. smeta_buf->header.id = cpu_to_le32(line->id);
  850. smeta_buf->header.type = cpu_to_le16(line->type);
  851. smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
  852. smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
  853. /* Start metadata */
  854. smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
  855. smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
  856. /* Fill metadata among lines */
  857. if (cur) {
  858. memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
  859. smeta_buf->prev_id = cpu_to_le32(cur->id);
  860. cur->emeta->buf->next_id = cpu_to_le32(line->id);
  861. } else {
  862. smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
  863. }
  864. /* All smeta must be set at this point */
  865. smeta_buf->header.crc = cpu_to_le32(
  866. pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
  867. smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
  868. /* End metadata */
  869. memcpy(&emeta_buf->header, &smeta_buf->header,
  870. sizeof(struct line_header));
  871. emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
  872. emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
  873. emeta_buf->header.crc = cpu_to_le32(
  874. pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
  875. emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
  876. emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
  877. emeta_buf->nr_valid_lbas = cpu_to_le64(0);
  878. emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
  879. emeta_buf->crc = cpu_to_le32(0);
  880. emeta_buf->prev_id = smeta_buf->prev_id;
  881. return 1;
  882. }
  883. static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
  884. {
  885. struct pblk_line_meta *lm = &pblk->lm;
  886. line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
  887. if (!line->map_bitmap)
  888. return -ENOMEM;
  889. /* will be initialized using bb info from map_bitmap */
  890. line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
  891. if (!line->invalid_bitmap) {
  892. kfree(line->map_bitmap);
  893. line->map_bitmap = NULL;
  894. return -ENOMEM;
  895. }
  896. return 0;
  897. }
  898. /* For now lines are always assumed full lines. Thus, smeta former and current
  899. * lun bitmaps are omitted.
  900. */
  901. static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
  902. int init)
  903. {
  904. struct nvm_tgt_dev *dev = pblk->dev;
  905. struct nvm_geo *geo = &dev->geo;
  906. struct pblk_line_meta *lm = &pblk->lm;
  907. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  908. u64 off;
  909. int bit = -1;
  910. int emeta_secs;
  911. line->sec_in_line = lm->sec_per_line;
  912. /* Capture bad block information on line mapping bitmaps */
  913. while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
  914. bit + 1)) < lm->blk_per_line) {
  915. off = bit * geo->ws_opt;
  916. bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
  917. lm->sec_per_line);
  918. bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
  919. lm->sec_per_line);
  920. line->sec_in_line -= geo->clba;
  921. }
  922. /* Mark smeta metadata sectors as bad sectors */
  923. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  924. off = bit * geo->ws_opt;
  925. bitmap_set(line->map_bitmap, off, lm->smeta_sec);
  926. line->sec_in_line -= lm->smeta_sec;
  927. line->smeta_ssec = off;
  928. line->cur_sec = off + lm->smeta_sec;
  929. if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
  930. pblk_debug(pblk, "line smeta I/O failed. Retry\n");
  931. return 0;
  932. }
  933. bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
  934. /* Mark emeta metadata sectors as bad sectors. We need to consider bad
  935. * blocks to make sure that there are enough sectors to store emeta
  936. */
  937. emeta_secs = lm->emeta_sec[0];
  938. off = lm->sec_per_line;
  939. while (emeta_secs) {
  940. off -= geo->ws_opt;
  941. if (!test_bit(off, line->invalid_bitmap)) {
  942. bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
  943. emeta_secs -= geo->ws_opt;
  944. }
  945. }
  946. line->emeta_ssec = off;
  947. line->sec_in_line -= lm->emeta_sec[0];
  948. line->nr_valid_lbas = 0;
  949. line->left_msecs = line->sec_in_line;
  950. *line->vsc = cpu_to_le32(line->sec_in_line);
  951. if (lm->sec_per_line - line->sec_in_line !=
  952. bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
  953. spin_lock(&line->lock);
  954. line->state = PBLK_LINESTATE_BAD;
  955. spin_unlock(&line->lock);
  956. list_add_tail(&line->list, &l_mg->bad_list);
  957. pblk_err(pblk, "unexpected line %d is bad\n", line->id);
  958. return 0;
  959. }
  960. return 1;
  961. }
  962. static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
  963. {
  964. struct pblk_line_meta *lm = &pblk->lm;
  965. struct nvm_tgt_dev *dev = pblk->dev;
  966. struct nvm_geo *geo = &dev->geo;
  967. int blk_to_erase = atomic_read(&line->blk_in_line);
  968. int i;
  969. for (i = 0; i < lm->blk_per_line; i++) {
  970. struct pblk_lun *rlun = &pblk->luns[i];
  971. int pos = pblk_ppa_to_pos(geo, rlun->bppa);
  972. int state = line->chks[pos].state;
  973. /* Free chunks should not be erased */
  974. if (state & NVM_CHK_ST_FREE) {
  975. set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
  976. line->erase_bitmap);
  977. blk_to_erase--;
  978. }
  979. }
  980. return blk_to_erase;
  981. }
  982. static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
  983. {
  984. struct pblk_line_meta *lm = &pblk->lm;
  985. int blk_in_line = atomic_read(&line->blk_in_line);
  986. int blk_to_erase;
  987. /* Bad blocks do not need to be erased */
  988. bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
  989. spin_lock(&line->lock);
  990. /* If we have not written to this line, we need to mark up free chunks
  991. * as already erased
  992. */
  993. if (line->state == PBLK_LINESTATE_NEW) {
  994. blk_to_erase = pblk_prepare_new_line(pblk, line);
  995. line->state = PBLK_LINESTATE_FREE;
  996. } else {
  997. blk_to_erase = blk_in_line;
  998. }
  999. if (blk_in_line < lm->min_blk_line) {
  1000. spin_unlock(&line->lock);
  1001. return -EAGAIN;
  1002. }
  1003. if (line->state != PBLK_LINESTATE_FREE) {
  1004. WARN(1, "pblk: corrupted line %d, state %d\n",
  1005. line->id, line->state);
  1006. spin_unlock(&line->lock);
  1007. return -EINTR;
  1008. }
  1009. line->state = PBLK_LINESTATE_OPEN;
  1010. atomic_set(&line->left_eblks, blk_to_erase);
  1011. atomic_set(&line->left_seblks, blk_to_erase);
  1012. line->meta_distance = lm->meta_distance;
  1013. spin_unlock(&line->lock);
  1014. kref_init(&line->ref);
  1015. return 0;
  1016. }
  1017. int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
  1018. {
  1019. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1020. int ret;
  1021. spin_lock(&l_mg->free_lock);
  1022. l_mg->data_line = line;
  1023. list_del(&line->list);
  1024. ret = pblk_line_prepare(pblk, line);
  1025. if (ret) {
  1026. list_add(&line->list, &l_mg->free_list);
  1027. spin_unlock(&l_mg->free_lock);
  1028. return ret;
  1029. }
  1030. spin_unlock(&l_mg->free_lock);
  1031. ret = pblk_line_alloc_bitmaps(pblk, line);
  1032. if (ret)
  1033. return ret;
  1034. if (!pblk_line_init_bb(pblk, line, 0)) {
  1035. list_add(&line->list, &l_mg->free_list);
  1036. return -EINTR;
  1037. }
  1038. pblk_rl_free_lines_dec(&pblk->rl, line, true);
  1039. return 0;
  1040. }
  1041. void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
  1042. {
  1043. kfree(line->map_bitmap);
  1044. line->map_bitmap = NULL;
  1045. line->smeta = NULL;
  1046. line->emeta = NULL;
  1047. }
  1048. static void pblk_line_reinit(struct pblk_line *line)
  1049. {
  1050. *line->vsc = cpu_to_le32(EMPTY_ENTRY);
  1051. line->map_bitmap = NULL;
  1052. line->invalid_bitmap = NULL;
  1053. line->smeta = NULL;
  1054. line->emeta = NULL;
  1055. }
  1056. void pblk_line_free(struct pblk_line *line)
  1057. {
  1058. kfree(line->map_bitmap);
  1059. kfree(line->invalid_bitmap);
  1060. pblk_line_reinit(line);
  1061. }
  1062. struct pblk_line *pblk_line_get(struct pblk *pblk)
  1063. {
  1064. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1065. struct pblk_line_meta *lm = &pblk->lm;
  1066. struct pblk_line *line;
  1067. int ret, bit;
  1068. lockdep_assert_held(&l_mg->free_lock);
  1069. retry:
  1070. if (list_empty(&l_mg->free_list)) {
  1071. pblk_err(pblk, "no free lines\n");
  1072. return NULL;
  1073. }
  1074. line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
  1075. list_del(&line->list);
  1076. l_mg->nr_free_lines--;
  1077. bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  1078. if (unlikely(bit >= lm->blk_per_line)) {
  1079. spin_lock(&line->lock);
  1080. line->state = PBLK_LINESTATE_BAD;
  1081. spin_unlock(&line->lock);
  1082. list_add_tail(&line->list, &l_mg->bad_list);
  1083. pblk_debug(pblk, "line %d is bad\n", line->id);
  1084. goto retry;
  1085. }
  1086. ret = pblk_line_prepare(pblk, line);
  1087. if (ret) {
  1088. switch (ret) {
  1089. case -EAGAIN:
  1090. list_add(&line->list, &l_mg->bad_list);
  1091. goto retry;
  1092. case -EINTR:
  1093. list_add(&line->list, &l_mg->corrupt_list);
  1094. goto retry;
  1095. default:
  1096. pblk_err(pblk, "failed to prepare line %d\n", line->id);
  1097. list_add(&line->list, &l_mg->free_list);
  1098. l_mg->nr_free_lines++;
  1099. return NULL;
  1100. }
  1101. }
  1102. return line;
  1103. }
  1104. static struct pblk_line *pblk_line_retry(struct pblk *pblk,
  1105. struct pblk_line *line)
  1106. {
  1107. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1108. struct pblk_line *retry_line;
  1109. retry:
  1110. spin_lock(&l_mg->free_lock);
  1111. retry_line = pblk_line_get(pblk);
  1112. if (!retry_line) {
  1113. l_mg->data_line = NULL;
  1114. spin_unlock(&l_mg->free_lock);
  1115. return NULL;
  1116. }
  1117. retry_line->map_bitmap = line->map_bitmap;
  1118. retry_line->invalid_bitmap = line->invalid_bitmap;
  1119. retry_line->smeta = line->smeta;
  1120. retry_line->emeta = line->emeta;
  1121. retry_line->meta_line = line->meta_line;
  1122. pblk_line_reinit(line);
  1123. l_mg->data_line = retry_line;
  1124. spin_unlock(&l_mg->free_lock);
  1125. pblk_rl_free_lines_dec(&pblk->rl, line, false);
  1126. if (pblk_line_erase(pblk, retry_line))
  1127. goto retry;
  1128. return retry_line;
  1129. }
  1130. static void pblk_set_space_limit(struct pblk *pblk)
  1131. {
  1132. struct pblk_rl *rl = &pblk->rl;
  1133. atomic_set(&rl->rb_space, 0);
  1134. }
  1135. struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
  1136. {
  1137. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1138. struct pblk_line *line;
  1139. spin_lock(&l_mg->free_lock);
  1140. line = pblk_line_get(pblk);
  1141. if (!line) {
  1142. spin_unlock(&l_mg->free_lock);
  1143. return NULL;
  1144. }
  1145. line->seq_nr = l_mg->d_seq_nr++;
  1146. line->type = PBLK_LINETYPE_DATA;
  1147. l_mg->data_line = line;
  1148. pblk_line_setup_metadata(line, l_mg, &pblk->lm);
  1149. /* Allocate next line for preparation */
  1150. l_mg->data_next = pblk_line_get(pblk);
  1151. if (!l_mg->data_next) {
  1152. /* If we cannot get a new line, we need to stop the pipeline.
  1153. * Only allow as many writes in as we can store safely and then
  1154. * fail gracefully
  1155. */
  1156. pblk_set_space_limit(pblk);
  1157. l_mg->data_next = NULL;
  1158. } else {
  1159. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1160. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1161. }
  1162. spin_unlock(&l_mg->free_lock);
  1163. if (pblk_line_alloc_bitmaps(pblk, line))
  1164. return NULL;
  1165. if (pblk_line_erase(pblk, line)) {
  1166. line = pblk_line_retry(pblk, line);
  1167. if (!line)
  1168. return NULL;
  1169. }
  1170. retry_setup:
  1171. if (!pblk_line_init_metadata(pblk, line, NULL)) {
  1172. line = pblk_line_retry(pblk, line);
  1173. if (!line)
  1174. return NULL;
  1175. goto retry_setup;
  1176. }
  1177. if (!pblk_line_init_bb(pblk, line, 1)) {
  1178. line = pblk_line_retry(pblk, line);
  1179. if (!line)
  1180. return NULL;
  1181. goto retry_setup;
  1182. }
  1183. pblk_rl_free_lines_dec(&pblk->rl, line, true);
  1184. return line;
  1185. }
  1186. static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
  1187. {
  1188. lockdep_assert_held(&pblk->l_mg.free_lock);
  1189. pblk_set_space_limit(pblk);
  1190. pblk->state = PBLK_STATE_STOPPING;
  1191. }
  1192. static void pblk_line_close_meta_sync(struct pblk *pblk)
  1193. {
  1194. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1195. struct pblk_line_meta *lm = &pblk->lm;
  1196. struct pblk_line *line, *tline;
  1197. LIST_HEAD(list);
  1198. spin_lock(&l_mg->close_lock);
  1199. if (list_empty(&l_mg->emeta_list)) {
  1200. spin_unlock(&l_mg->close_lock);
  1201. return;
  1202. }
  1203. list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
  1204. spin_unlock(&l_mg->close_lock);
  1205. list_for_each_entry_safe(line, tline, &list, list) {
  1206. struct pblk_emeta *emeta = line->emeta;
  1207. while (emeta->mem < lm->emeta_len[0]) {
  1208. int ret;
  1209. ret = pblk_submit_meta_io(pblk, line);
  1210. if (ret) {
  1211. pblk_err(pblk, "sync meta line %d failed (%d)\n",
  1212. line->id, ret);
  1213. return;
  1214. }
  1215. }
  1216. }
  1217. pblk_wait_for_meta(pblk);
  1218. flush_workqueue(pblk->close_wq);
  1219. }
  1220. void __pblk_pipeline_flush(struct pblk *pblk)
  1221. {
  1222. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1223. int ret;
  1224. spin_lock(&l_mg->free_lock);
  1225. if (pblk->state == PBLK_STATE_RECOVERING ||
  1226. pblk->state == PBLK_STATE_STOPPED) {
  1227. spin_unlock(&l_mg->free_lock);
  1228. return;
  1229. }
  1230. pblk->state = PBLK_STATE_RECOVERING;
  1231. spin_unlock(&l_mg->free_lock);
  1232. pblk_flush_writer(pblk);
  1233. pblk_wait_for_meta(pblk);
  1234. ret = pblk_recov_pad(pblk);
  1235. if (ret) {
  1236. pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
  1237. return;
  1238. }
  1239. flush_workqueue(pblk->bb_wq);
  1240. pblk_line_close_meta_sync(pblk);
  1241. }
  1242. void __pblk_pipeline_stop(struct pblk *pblk)
  1243. {
  1244. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1245. spin_lock(&l_mg->free_lock);
  1246. pblk->state = PBLK_STATE_STOPPED;
  1247. l_mg->data_line = NULL;
  1248. l_mg->data_next = NULL;
  1249. spin_unlock(&l_mg->free_lock);
  1250. }
  1251. void pblk_pipeline_stop(struct pblk *pblk)
  1252. {
  1253. __pblk_pipeline_flush(pblk);
  1254. __pblk_pipeline_stop(pblk);
  1255. }
  1256. struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
  1257. {
  1258. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1259. struct pblk_line *cur, *new = NULL;
  1260. unsigned int left_seblks;
  1261. cur = l_mg->data_line;
  1262. new = l_mg->data_next;
  1263. if (!new)
  1264. goto out;
  1265. l_mg->data_line = new;
  1266. spin_lock(&l_mg->free_lock);
  1267. pblk_line_setup_metadata(new, l_mg, &pblk->lm);
  1268. spin_unlock(&l_mg->free_lock);
  1269. retry_erase:
  1270. left_seblks = atomic_read(&new->left_seblks);
  1271. if (left_seblks) {
  1272. /* If line is not fully erased, erase it */
  1273. if (atomic_read(&new->left_eblks)) {
  1274. if (pblk_line_erase(pblk, new))
  1275. goto out;
  1276. } else {
  1277. io_schedule();
  1278. }
  1279. goto retry_erase;
  1280. }
  1281. if (pblk_line_alloc_bitmaps(pblk, new))
  1282. return NULL;
  1283. retry_setup:
  1284. if (!pblk_line_init_metadata(pblk, new, cur)) {
  1285. new = pblk_line_retry(pblk, new);
  1286. if (!new)
  1287. goto out;
  1288. goto retry_setup;
  1289. }
  1290. if (!pblk_line_init_bb(pblk, new, 1)) {
  1291. new = pblk_line_retry(pblk, new);
  1292. if (!new)
  1293. goto out;
  1294. goto retry_setup;
  1295. }
  1296. pblk_rl_free_lines_dec(&pblk->rl, new, true);
  1297. /* Allocate next line for preparation */
  1298. spin_lock(&l_mg->free_lock);
  1299. l_mg->data_next = pblk_line_get(pblk);
  1300. if (!l_mg->data_next) {
  1301. /* If we cannot get a new line, we need to stop the pipeline.
  1302. * Only allow as many writes in as we can store safely and then
  1303. * fail gracefully
  1304. */
  1305. pblk_stop_writes(pblk, new);
  1306. l_mg->data_next = NULL;
  1307. } else {
  1308. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  1309. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  1310. }
  1311. spin_unlock(&l_mg->free_lock);
  1312. out:
  1313. return new;
  1314. }
  1315. static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
  1316. {
  1317. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1318. struct pblk_gc *gc = &pblk->gc;
  1319. spin_lock(&line->lock);
  1320. WARN_ON(line->state != PBLK_LINESTATE_GC);
  1321. line->state = PBLK_LINESTATE_FREE;
  1322. line->gc_group = PBLK_LINEGC_NONE;
  1323. pblk_line_free(line);
  1324. if (line->w_err_gc->has_write_err) {
  1325. pblk_rl_werr_line_out(&pblk->rl);
  1326. line->w_err_gc->has_write_err = 0;
  1327. }
  1328. spin_unlock(&line->lock);
  1329. atomic_dec(&gc->pipeline_gc);
  1330. spin_lock(&l_mg->free_lock);
  1331. list_add_tail(&line->list, &l_mg->free_list);
  1332. l_mg->nr_free_lines++;
  1333. spin_unlock(&l_mg->free_lock);
  1334. pblk_rl_free_lines_inc(&pblk->rl, line);
  1335. }
  1336. static void pblk_line_put_ws(struct work_struct *work)
  1337. {
  1338. struct pblk_line_ws *line_put_ws = container_of(work,
  1339. struct pblk_line_ws, ws);
  1340. struct pblk *pblk = line_put_ws->pblk;
  1341. struct pblk_line *line = line_put_ws->line;
  1342. __pblk_line_put(pblk, line);
  1343. mempool_free(line_put_ws, &pblk->gen_ws_pool);
  1344. }
  1345. void pblk_line_put(struct kref *ref)
  1346. {
  1347. struct pblk_line *line = container_of(ref, struct pblk_line, ref);
  1348. struct pblk *pblk = line->pblk;
  1349. __pblk_line_put(pblk, line);
  1350. }
  1351. void pblk_line_put_wq(struct kref *ref)
  1352. {
  1353. struct pblk_line *line = container_of(ref, struct pblk_line, ref);
  1354. struct pblk *pblk = line->pblk;
  1355. struct pblk_line_ws *line_put_ws;
  1356. line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
  1357. if (!line_put_ws)
  1358. return;
  1359. line_put_ws->pblk = pblk;
  1360. line_put_ws->line = line;
  1361. line_put_ws->priv = NULL;
  1362. INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
  1363. queue_work(pblk->r_end_wq, &line_put_ws->ws);
  1364. }
  1365. int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
  1366. {
  1367. struct nvm_rq *rqd;
  1368. int err;
  1369. rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
  1370. pblk_setup_e_rq(pblk, rqd, ppa);
  1371. rqd->end_io = pblk_end_io_erase;
  1372. rqd->private = pblk;
  1373. /* The write thread schedules erases so that it minimizes disturbances
  1374. * with writes. Thus, there is no need to take the LUN semaphore.
  1375. */
  1376. err = pblk_submit_io(pblk, rqd);
  1377. if (err) {
  1378. struct nvm_tgt_dev *dev = pblk->dev;
  1379. struct nvm_geo *geo = &dev->geo;
  1380. pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
  1381. pblk_ppa_to_line(ppa),
  1382. pblk_ppa_to_pos(geo, ppa));
  1383. }
  1384. return err;
  1385. }
  1386. struct pblk_line *pblk_line_get_data(struct pblk *pblk)
  1387. {
  1388. return pblk->l_mg.data_line;
  1389. }
  1390. /* For now, always erase next line */
  1391. struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
  1392. {
  1393. return pblk->l_mg.data_next;
  1394. }
  1395. int pblk_line_is_full(struct pblk_line *line)
  1396. {
  1397. return (line->left_msecs == 0);
  1398. }
  1399. static void pblk_line_should_sync_meta(struct pblk *pblk)
  1400. {
  1401. if (pblk_rl_is_limit(&pblk->rl))
  1402. pblk_line_close_meta_sync(pblk);
  1403. }
  1404. void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
  1405. {
  1406. struct nvm_tgt_dev *dev = pblk->dev;
  1407. struct nvm_geo *geo = &dev->geo;
  1408. struct pblk_line_meta *lm = &pblk->lm;
  1409. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1410. struct list_head *move_list;
  1411. int i;
  1412. #ifdef CONFIG_NVM_PBLK_DEBUG
  1413. WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
  1414. "pblk: corrupt closed line %d\n", line->id);
  1415. #endif
  1416. spin_lock(&l_mg->free_lock);
  1417. WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
  1418. spin_unlock(&l_mg->free_lock);
  1419. spin_lock(&l_mg->gc_lock);
  1420. spin_lock(&line->lock);
  1421. WARN_ON(line->state != PBLK_LINESTATE_OPEN);
  1422. line->state = PBLK_LINESTATE_CLOSED;
  1423. move_list = pblk_line_gc_list(pblk, line);
  1424. list_add_tail(&line->list, move_list);
  1425. kfree(line->map_bitmap);
  1426. line->map_bitmap = NULL;
  1427. line->smeta = NULL;
  1428. line->emeta = NULL;
  1429. for (i = 0; i < lm->blk_per_line; i++) {
  1430. struct pblk_lun *rlun = &pblk->luns[i];
  1431. int pos = pblk_ppa_to_pos(geo, rlun->bppa);
  1432. int state = line->chks[pos].state;
  1433. if (!(state & NVM_CHK_ST_OFFLINE))
  1434. state = NVM_CHK_ST_CLOSED;
  1435. }
  1436. spin_unlock(&line->lock);
  1437. spin_unlock(&l_mg->gc_lock);
  1438. }
  1439. void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
  1440. {
  1441. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1442. struct pblk_line_meta *lm = &pblk->lm;
  1443. struct pblk_emeta *emeta = line->emeta;
  1444. struct line_emeta *emeta_buf = emeta->buf;
  1445. struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
  1446. /* No need for exact vsc value; avoid a big line lock and take aprox. */
  1447. memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
  1448. memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
  1449. wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
  1450. wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
  1451. wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
  1452. emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
  1453. emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
  1454. spin_lock(&l_mg->close_lock);
  1455. spin_lock(&line->lock);
  1456. /* Update the in-memory start address for emeta, in case it has
  1457. * shifted due to write errors
  1458. */
  1459. if (line->emeta_ssec != line->cur_sec)
  1460. line->emeta_ssec = line->cur_sec;
  1461. list_add_tail(&line->list, &l_mg->emeta_list);
  1462. spin_unlock(&line->lock);
  1463. spin_unlock(&l_mg->close_lock);
  1464. pblk_line_should_sync_meta(pblk);
  1465. }
  1466. static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
  1467. {
  1468. struct pblk_line_meta *lm = &pblk->lm;
  1469. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  1470. unsigned int lba_list_size = lm->emeta_len[2];
  1471. struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
  1472. struct pblk_emeta *emeta = line->emeta;
  1473. w_err_gc->lba_list = pblk_malloc(lba_list_size,
  1474. l_mg->emeta_alloc_type, GFP_KERNEL);
  1475. memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
  1476. lba_list_size);
  1477. }
  1478. void pblk_line_close_ws(struct work_struct *work)
  1479. {
  1480. struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  1481. ws);
  1482. struct pblk *pblk = line_ws->pblk;
  1483. struct pblk_line *line = line_ws->line;
  1484. struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
  1485. /* Write errors makes the emeta start address stored in smeta invalid,
  1486. * so keep a copy of the lba list until we've gc'd the line
  1487. */
  1488. if (w_err_gc->has_write_err)
  1489. pblk_save_lba_list(pblk, line);
  1490. pblk_line_close(pblk, line);
  1491. mempool_free(line_ws, &pblk->gen_ws_pool);
  1492. }
  1493. void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
  1494. void (*work)(struct work_struct *), gfp_t gfp_mask,
  1495. struct workqueue_struct *wq)
  1496. {
  1497. struct pblk_line_ws *line_ws;
  1498. line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
  1499. line_ws->pblk = pblk;
  1500. line_ws->line = line;
  1501. line_ws->priv = priv;
  1502. INIT_WORK(&line_ws->ws, work);
  1503. queue_work(wq, &line_ws->ws);
  1504. }
  1505. static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
  1506. int nr_ppas, int pos)
  1507. {
  1508. struct pblk_lun *rlun = &pblk->luns[pos];
  1509. int ret;
  1510. /*
  1511. * Only send one inflight I/O per LUN. Since we map at a page
  1512. * granurality, all ppas in the I/O will map to the same LUN
  1513. */
  1514. #ifdef CONFIG_NVM_PBLK_DEBUG
  1515. int i;
  1516. for (i = 1; i < nr_ppas; i++)
  1517. WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
  1518. ppa_list[0].a.ch != ppa_list[i].a.ch);
  1519. #endif
  1520. ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
  1521. if (ret == -ETIME || ret == -EINTR)
  1522. pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
  1523. -ret);
  1524. }
  1525. void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
  1526. {
  1527. struct nvm_tgt_dev *dev = pblk->dev;
  1528. struct nvm_geo *geo = &dev->geo;
  1529. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1530. __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
  1531. }
  1532. void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1533. unsigned long *lun_bitmap)
  1534. {
  1535. struct nvm_tgt_dev *dev = pblk->dev;
  1536. struct nvm_geo *geo = &dev->geo;
  1537. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1538. /* If the LUN has been locked for this same request, do no attempt to
  1539. * lock it again
  1540. */
  1541. if (test_and_set_bit(pos, lun_bitmap))
  1542. return;
  1543. __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
  1544. }
  1545. void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
  1546. {
  1547. struct nvm_tgt_dev *dev = pblk->dev;
  1548. struct nvm_geo *geo = &dev->geo;
  1549. struct pblk_lun *rlun;
  1550. int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
  1551. #ifdef CONFIG_NVM_PBLK_DEBUG
  1552. int i;
  1553. for (i = 1; i < nr_ppas; i++)
  1554. WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
  1555. ppa_list[0].a.ch != ppa_list[i].a.ch);
  1556. #endif
  1557. rlun = &pblk->luns[pos];
  1558. up(&rlun->wr_sem);
  1559. }
  1560. void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  1561. unsigned long *lun_bitmap)
  1562. {
  1563. struct nvm_tgt_dev *dev = pblk->dev;
  1564. struct nvm_geo *geo = &dev->geo;
  1565. struct pblk_lun *rlun;
  1566. int num_lun = geo->all_luns;
  1567. int bit = -1;
  1568. while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
  1569. rlun = &pblk->luns[bit];
  1570. up(&rlun->wr_sem);
  1571. }
  1572. }
  1573. void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1574. {
  1575. struct ppa_addr ppa_l2p;
  1576. /* logic error: lba out-of-bounds. Ignore update */
  1577. if (!(lba < pblk->rl.nr_secs)) {
  1578. WARN(1, "pblk: corrupted L2P map request\n");
  1579. return;
  1580. }
  1581. spin_lock(&pblk->trans_lock);
  1582. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1583. if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
  1584. pblk_map_invalidate(pblk, ppa_l2p);
  1585. pblk_trans_map_set(pblk, lba, ppa);
  1586. spin_unlock(&pblk->trans_lock);
  1587. }
  1588. void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
  1589. {
  1590. #ifdef CONFIG_NVM_PBLK_DEBUG
  1591. /* Callers must ensure that the ppa points to a cache address */
  1592. BUG_ON(!pblk_addr_in_cache(ppa));
  1593. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
  1594. #endif
  1595. pblk_update_map(pblk, lba, ppa);
  1596. }
  1597. int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
  1598. struct pblk_line *gc_line, u64 paddr_gc)
  1599. {
  1600. struct ppa_addr ppa_l2p, ppa_gc;
  1601. int ret = 1;
  1602. #ifdef CONFIG_NVM_PBLK_DEBUG
  1603. /* Callers must ensure that the ppa points to a cache address */
  1604. BUG_ON(!pblk_addr_in_cache(ppa_new));
  1605. BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
  1606. #endif
  1607. /* logic error: lba out-of-bounds. Ignore update */
  1608. if (!(lba < pblk->rl.nr_secs)) {
  1609. WARN(1, "pblk: corrupted L2P map request\n");
  1610. return 0;
  1611. }
  1612. spin_lock(&pblk->trans_lock);
  1613. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1614. ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
  1615. if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
  1616. spin_lock(&gc_line->lock);
  1617. WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
  1618. "pblk: corrupted GC update");
  1619. spin_unlock(&gc_line->lock);
  1620. ret = 0;
  1621. goto out;
  1622. }
  1623. pblk_trans_map_set(pblk, lba, ppa_new);
  1624. out:
  1625. spin_unlock(&pblk->trans_lock);
  1626. return ret;
  1627. }
  1628. void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
  1629. struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
  1630. {
  1631. struct ppa_addr ppa_l2p;
  1632. #ifdef CONFIG_NVM_PBLK_DEBUG
  1633. /* Callers must ensure that the ppa points to a device address */
  1634. BUG_ON(pblk_addr_in_cache(ppa_mapped));
  1635. #endif
  1636. /* Invalidate and discard padded entries */
  1637. if (lba == ADDR_EMPTY) {
  1638. atomic64_inc(&pblk->pad_wa);
  1639. #ifdef CONFIG_NVM_PBLK_DEBUG
  1640. atomic_long_inc(&pblk->padded_wb);
  1641. #endif
  1642. if (!pblk_ppa_empty(ppa_mapped))
  1643. pblk_map_invalidate(pblk, ppa_mapped);
  1644. return;
  1645. }
  1646. /* logic error: lba out-of-bounds. Ignore update */
  1647. if (!(lba < pblk->rl.nr_secs)) {
  1648. WARN(1, "pblk: corrupted L2P map request\n");
  1649. return;
  1650. }
  1651. spin_lock(&pblk->trans_lock);
  1652. ppa_l2p = pblk_trans_map_get(pblk, lba);
  1653. /* Do not update L2P if the cacheline has been updated. In this case,
  1654. * the mapped ppa must be invalidated
  1655. */
  1656. if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
  1657. if (!pblk_ppa_empty(ppa_mapped))
  1658. pblk_map_invalidate(pblk, ppa_mapped);
  1659. goto out;
  1660. }
  1661. #ifdef CONFIG_NVM_PBLK_DEBUG
  1662. WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
  1663. #endif
  1664. pblk_trans_map_set(pblk, lba, ppa_mapped);
  1665. out:
  1666. spin_unlock(&pblk->trans_lock);
  1667. }
  1668. void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
  1669. sector_t blba, int nr_secs)
  1670. {
  1671. int i;
  1672. spin_lock(&pblk->trans_lock);
  1673. for (i = 0; i < nr_secs; i++) {
  1674. struct ppa_addr ppa;
  1675. ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
  1676. /* If the L2P entry maps to a line, the reference is valid */
  1677. if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
  1678. int line_id = pblk_ppa_to_line(ppa);
  1679. struct pblk_line *line = &pblk->lines[line_id];
  1680. kref_get(&line->ref);
  1681. }
  1682. }
  1683. spin_unlock(&pblk->trans_lock);
  1684. }
  1685. void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
  1686. u64 *lba_list, int nr_secs)
  1687. {
  1688. u64 lba;
  1689. int i;
  1690. spin_lock(&pblk->trans_lock);
  1691. for (i = 0; i < nr_secs; i++) {
  1692. lba = lba_list[i];
  1693. if (lba != ADDR_EMPTY) {
  1694. /* logic error: lba out-of-bounds. Ignore update */
  1695. if (!(lba < pblk->rl.nr_secs)) {
  1696. WARN(1, "pblk: corrupted L2P map request\n");
  1697. continue;
  1698. }
  1699. ppas[i] = pblk_trans_map_get(pblk, lba);
  1700. }
  1701. }
  1702. spin_unlock(&pblk->trans_lock);
  1703. }