pblk-recovery.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2016 CNEX Labs
  4. * Initial: Javier Gonzalez <javier@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-recovery.c - pblk's recovery path
  16. */
  17. #include "pblk.h"
  18. #include "pblk-trace.h"
  19. int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
  20. {
  21. u32 crc;
  22. crc = pblk_calc_emeta_crc(pblk, emeta_buf);
  23. if (le32_to_cpu(emeta_buf->crc) != crc)
  24. return 1;
  25. if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
  26. return 1;
  27. return 0;
  28. }
  29. static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
  30. {
  31. struct nvm_tgt_dev *dev = pblk->dev;
  32. struct nvm_geo *geo = &dev->geo;
  33. struct pblk_line_meta *lm = &pblk->lm;
  34. struct pblk_emeta *emeta = line->emeta;
  35. struct line_emeta *emeta_buf = emeta->buf;
  36. __le64 *lba_list;
  37. u64 data_start, data_end;
  38. u64 nr_valid_lbas, nr_lbas = 0;
  39. u64 i;
  40. lba_list = emeta_to_lbas(pblk, emeta_buf);
  41. if (!lba_list)
  42. return 1;
  43. data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
  44. data_end = line->emeta_ssec;
  45. nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
  46. for (i = data_start; i < data_end; i++) {
  47. struct ppa_addr ppa;
  48. int pos;
  49. ppa = addr_to_gen_ppa(pblk, i, line->id);
  50. pos = pblk_ppa_to_pos(geo, ppa);
  51. /* Do not update bad blocks */
  52. if (test_bit(pos, line->blk_bitmap))
  53. continue;
  54. if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
  55. spin_lock(&line->lock);
  56. if (test_and_set_bit(i, line->invalid_bitmap))
  57. WARN_ONCE(1, "pblk: rec. double invalidate:\n");
  58. else
  59. le32_add_cpu(line->vsc, -1);
  60. spin_unlock(&line->lock);
  61. continue;
  62. }
  63. pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
  64. nr_lbas++;
  65. }
  66. if (nr_valid_lbas != nr_lbas)
  67. pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
  68. line->id, nr_valid_lbas, nr_lbas);
  69. line->left_msecs = 0;
  70. return 0;
  71. }
  72. static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
  73. u64 written_secs)
  74. {
  75. int i;
  76. for (i = 0; i < written_secs; i += pblk->min_write_pgs)
  77. pblk_alloc_page(pblk, line, pblk->min_write_pgs);
  78. }
  79. static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
  80. {
  81. struct pblk_line_meta *lm = &pblk->lm;
  82. int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
  83. u64 written_secs = 0;
  84. int valid_chunks = 0;
  85. int i;
  86. for (i = 0; i < lm->blk_per_line; i++) {
  87. struct nvm_chk_meta *chunk = &line->chks[i];
  88. if (chunk->state & NVM_CHK_ST_OFFLINE)
  89. continue;
  90. written_secs += chunk->wp;
  91. valid_chunks++;
  92. }
  93. if (lm->blk_per_line - nr_bb != valid_chunks)
  94. pblk_err(pblk, "recovery line %d is bad\n", line->id);
  95. pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);
  96. return written_secs;
  97. }
  98. struct pblk_recov_alloc {
  99. struct ppa_addr *ppa_list;
  100. struct pblk_sec_meta *meta_list;
  101. struct nvm_rq *rqd;
  102. void *data;
  103. dma_addr_t dma_ppa_list;
  104. dma_addr_t dma_meta_list;
  105. };
  106. static void pblk_recov_complete(struct kref *ref)
  107. {
  108. struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
  109. complete(&pad_rq->wait);
  110. }
  111. static void pblk_end_io_recov(struct nvm_rq *rqd)
  112. {
  113. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  114. struct pblk_pad_rq *pad_rq = rqd->private;
  115. struct pblk *pblk = pad_rq->pblk;
  116. pblk_up_chunk(pblk, ppa_list[0]);
  117. pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
  118. atomic_dec(&pblk->inflight_io);
  119. kref_put(&pad_rq->ref, pblk_recov_complete);
  120. }
  121. /* pad line using line bitmap. */
  122. static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
  123. int left_ppas)
  124. {
  125. struct nvm_tgt_dev *dev = pblk->dev;
  126. struct nvm_geo *geo = &dev->geo;
  127. struct pblk_sec_meta *meta_list;
  128. struct pblk_pad_rq *pad_rq;
  129. struct nvm_rq *rqd;
  130. struct bio *bio;
  131. void *data;
  132. __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
  133. u64 w_ptr = line->cur_sec;
  134. int left_line_ppas, rq_ppas, rq_len;
  135. int i, j;
  136. int ret = 0;
  137. spin_lock(&line->lock);
  138. left_line_ppas = line->left_msecs;
  139. spin_unlock(&line->lock);
  140. pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
  141. if (!pad_rq)
  142. return -ENOMEM;
  143. data = vzalloc(array_size(pblk->max_write_pgs, geo->csecs));
  144. if (!data) {
  145. ret = -ENOMEM;
  146. goto free_rq;
  147. }
  148. pad_rq->pblk = pblk;
  149. init_completion(&pad_rq->wait);
  150. kref_init(&pad_rq->ref);
  151. next_pad_rq:
  152. rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
  153. if (rq_ppas < pblk->min_write_pgs) {
  154. pblk_err(pblk, "corrupted pad line %d\n", line->id);
  155. goto fail_free_pad;
  156. }
  157. rq_len = rq_ppas * geo->csecs;
  158. bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
  159. PBLK_VMALLOC_META, GFP_KERNEL);
  160. if (IS_ERR(bio)) {
  161. ret = PTR_ERR(bio);
  162. goto fail_free_pad;
  163. }
  164. bio->bi_iter.bi_sector = 0; /* internal bio */
  165. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  166. rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
  167. ret = pblk_alloc_rqd_meta(pblk, rqd);
  168. if (ret)
  169. goto fail_free_rqd;
  170. rqd->bio = bio;
  171. rqd->opcode = NVM_OP_PWRITE;
  172. rqd->is_seq = 1;
  173. rqd->nr_ppas = rq_ppas;
  174. rqd->end_io = pblk_end_io_recov;
  175. rqd->private = pad_rq;
  176. meta_list = rqd->meta_list;
  177. for (i = 0; i < rqd->nr_ppas; ) {
  178. struct ppa_addr ppa;
  179. int pos;
  180. w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
  181. ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
  182. pos = pblk_ppa_to_pos(geo, ppa);
  183. while (test_bit(pos, line->blk_bitmap)) {
  184. w_ptr += pblk->min_write_pgs;
  185. ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
  186. pos = pblk_ppa_to_pos(geo, ppa);
  187. }
  188. for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
  189. struct ppa_addr dev_ppa;
  190. __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
  191. dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
  192. pblk_map_invalidate(pblk, dev_ppa);
  193. lba_list[w_ptr] = meta_list[i].lba = addr_empty;
  194. rqd->ppa_list[i] = dev_ppa;
  195. }
  196. }
  197. kref_get(&pad_rq->ref);
  198. pblk_down_chunk(pblk, rqd->ppa_list[0]);
  199. ret = pblk_submit_io(pblk, rqd);
  200. if (ret) {
  201. pblk_err(pblk, "I/O submission failed: %d\n", ret);
  202. pblk_up_chunk(pblk, rqd->ppa_list[0]);
  203. goto fail_free_rqd;
  204. }
  205. left_line_ppas -= rq_ppas;
  206. left_ppas -= rq_ppas;
  207. if (left_ppas && left_line_ppas)
  208. goto next_pad_rq;
  209. kref_put(&pad_rq->ref, pblk_recov_complete);
  210. if (!wait_for_completion_io_timeout(&pad_rq->wait,
  211. msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
  212. pblk_err(pblk, "pad write timed out\n");
  213. ret = -ETIME;
  214. }
  215. if (!pblk_line_is_full(line))
  216. pblk_err(pblk, "corrupted padded line: %d\n", line->id);
  217. vfree(data);
  218. free_rq:
  219. kfree(pad_rq);
  220. return ret;
  221. fail_free_rqd:
  222. pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
  223. bio_put(bio);
  224. fail_free_pad:
  225. kfree(pad_rq);
  226. vfree(data);
  227. return ret;
  228. }
  229. static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
  230. {
  231. struct nvm_tgt_dev *dev = pblk->dev;
  232. struct nvm_geo *geo = &dev->geo;
  233. int distance = geo->mw_cunits * geo->all_luns * geo->ws_opt;
  234. return (distance > line->left_msecs) ? line->left_msecs : distance;
  235. }
  236. static int pblk_line_wp_is_unbalanced(struct pblk *pblk,
  237. struct pblk_line *line)
  238. {
  239. struct nvm_tgt_dev *dev = pblk->dev;
  240. struct nvm_geo *geo = &dev->geo;
  241. struct pblk_line_meta *lm = &pblk->lm;
  242. struct pblk_lun *rlun;
  243. struct nvm_chk_meta *chunk;
  244. struct ppa_addr ppa;
  245. u64 line_wp;
  246. int pos, i;
  247. rlun = &pblk->luns[0];
  248. ppa = rlun->bppa;
  249. pos = pblk_ppa_to_pos(geo, ppa);
  250. chunk = &line->chks[pos];
  251. line_wp = chunk->wp;
  252. for (i = 1; i < lm->blk_per_line; i++) {
  253. rlun = &pblk->luns[i];
  254. ppa = rlun->bppa;
  255. pos = pblk_ppa_to_pos(geo, ppa);
  256. chunk = &line->chks[pos];
  257. if (chunk->wp > line_wp)
  258. return 1;
  259. else if (chunk->wp < line_wp)
  260. line_wp = chunk->wp;
  261. }
  262. return 0;
  263. }
  264. static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
  265. struct pblk_recov_alloc p)
  266. {
  267. struct nvm_tgt_dev *dev = pblk->dev;
  268. struct nvm_geo *geo = &dev->geo;
  269. struct ppa_addr *ppa_list;
  270. struct pblk_sec_meta *meta_list;
  271. struct nvm_rq *rqd;
  272. struct bio *bio;
  273. void *data;
  274. dma_addr_t dma_ppa_list, dma_meta_list;
  275. __le64 *lba_list;
  276. u64 paddr = 0;
  277. bool padded = false;
  278. int rq_ppas, rq_len;
  279. int i, j;
  280. int ret;
  281. u64 left_ppas = pblk_sec_in_open_line(pblk, line);
  282. if (pblk_line_wp_is_unbalanced(pblk, line))
  283. pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
  284. ppa_list = p.ppa_list;
  285. meta_list = p.meta_list;
  286. rqd = p.rqd;
  287. data = p.data;
  288. dma_ppa_list = p.dma_ppa_list;
  289. dma_meta_list = p.dma_meta_list;
  290. lba_list = emeta_to_lbas(pblk, line->emeta->buf);
  291. next_rq:
  292. memset(rqd, 0, pblk_g_rq_size);
  293. rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
  294. if (!rq_ppas)
  295. rq_ppas = pblk->min_write_pgs;
  296. rq_len = rq_ppas * geo->csecs;
  297. bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
  298. if (IS_ERR(bio))
  299. return PTR_ERR(bio);
  300. bio->bi_iter.bi_sector = 0; /* internal bio */
  301. bio_set_op_attrs(bio, REQ_OP_READ, 0);
  302. rqd->bio = bio;
  303. rqd->opcode = NVM_OP_PREAD;
  304. rqd->meta_list = meta_list;
  305. rqd->nr_ppas = rq_ppas;
  306. rqd->ppa_list = ppa_list;
  307. rqd->dma_ppa_list = dma_ppa_list;
  308. rqd->dma_meta_list = dma_meta_list;
  309. if (pblk_io_aligned(pblk, rq_ppas))
  310. rqd->is_seq = 1;
  311. retry_rq:
  312. for (i = 0; i < rqd->nr_ppas; ) {
  313. struct ppa_addr ppa;
  314. int pos;
  315. ppa = addr_to_gen_ppa(pblk, paddr, line->id);
  316. pos = pblk_ppa_to_pos(geo, ppa);
  317. while (test_bit(pos, line->blk_bitmap)) {
  318. paddr += pblk->min_write_pgs;
  319. ppa = addr_to_gen_ppa(pblk, paddr, line->id);
  320. pos = pblk_ppa_to_pos(geo, ppa);
  321. }
  322. for (j = 0; j < pblk->min_write_pgs; j++, i++)
  323. rqd->ppa_list[i] =
  324. addr_to_gen_ppa(pblk, paddr + j, line->id);
  325. }
  326. ret = pblk_submit_io_sync(pblk, rqd);
  327. if (ret) {
  328. pblk_err(pblk, "I/O submission failed: %d\n", ret);
  329. bio_put(bio);
  330. return ret;
  331. }
  332. atomic_dec(&pblk->inflight_io);
  333. /* If a read fails, do a best effort by padding the line and retrying */
  334. if (rqd->error) {
  335. int pad_distance, ret;
  336. if (padded) {
  337. pblk_log_read_err(pblk, rqd);
  338. return -EINTR;
  339. }
  340. pad_distance = pblk_pad_distance(pblk, line);
  341. ret = pblk_recov_pad_line(pblk, line, pad_distance);
  342. if (ret)
  343. return ret;
  344. padded = true;
  345. goto retry_rq;
  346. }
  347. for (i = 0; i < rqd->nr_ppas; i++) {
  348. u64 lba = le64_to_cpu(meta_list[i].lba);
  349. lba_list[paddr++] = cpu_to_le64(lba);
  350. if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
  351. continue;
  352. line->nr_valid_lbas++;
  353. pblk_update_map(pblk, lba, rqd->ppa_list[i]);
  354. }
  355. left_ppas -= rq_ppas;
  356. if (left_ppas > 0)
  357. goto next_rq;
  358. #ifdef CONFIG_NVM_PBLK_DEBUG
  359. WARN_ON(padded && !pblk_line_is_full(line));
  360. #endif
  361. return 0;
  362. }
  363. /* Scan line for lbas on out of bound area */
  364. static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
  365. {
  366. struct nvm_tgt_dev *dev = pblk->dev;
  367. struct nvm_geo *geo = &dev->geo;
  368. struct nvm_rq *rqd;
  369. struct ppa_addr *ppa_list;
  370. struct pblk_sec_meta *meta_list;
  371. struct pblk_recov_alloc p;
  372. void *data;
  373. dma_addr_t dma_ppa_list, dma_meta_list;
  374. int ret = 0;
  375. meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
  376. if (!meta_list)
  377. return -ENOMEM;
  378. ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
  379. dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
  380. data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
  381. if (!data) {
  382. ret = -ENOMEM;
  383. goto free_meta_list;
  384. }
  385. rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
  386. memset(rqd, 0, pblk_g_rq_size);
  387. p.ppa_list = ppa_list;
  388. p.meta_list = meta_list;
  389. p.rqd = rqd;
  390. p.data = data;
  391. p.dma_ppa_list = dma_ppa_list;
  392. p.dma_meta_list = dma_meta_list;
  393. ret = pblk_recov_scan_oob(pblk, line, p);
  394. if (ret) {
  395. pblk_err(pblk, "could not recover L2P form OOB\n");
  396. goto out;
  397. }
  398. if (pblk_line_is_full(line))
  399. pblk_line_recov_close(pblk, line);
  400. out:
  401. mempool_free(rqd, &pblk->r_rq_pool);
  402. kfree(data);
  403. free_meta_list:
  404. nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
  405. return ret;
  406. }
  407. /* Insert lines ordered by sequence number (seq_num) on list */
  408. static void pblk_recov_line_add_ordered(struct list_head *head,
  409. struct pblk_line *line)
  410. {
  411. struct pblk_line *t = NULL;
  412. list_for_each_entry(t, head, list)
  413. if (t->seq_nr > line->seq_nr)
  414. break;
  415. __list_add(&line->list, t->list.prev, &t->list);
  416. }
  417. static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
  418. {
  419. struct nvm_tgt_dev *dev = pblk->dev;
  420. struct nvm_geo *geo = &dev->geo;
  421. struct pblk_line_meta *lm = &pblk->lm;
  422. unsigned int emeta_secs;
  423. u64 emeta_start;
  424. struct ppa_addr ppa;
  425. int pos;
  426. emeta_secs = lm->emeta_sec[0];
  427. emeta_start = lm->sec_per_line;
  428. while (emeta_secs) {
  429. emeta_start--;
  430. ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
  431. pos = pblk_ppa_to_pos(geo, ppa);
  432. if (!test_bit(pos, line->blk_bitmap))
  433. emeta_secs--;
  434. }
  435. return emeta_start;
  436. }
  437. static int pblk_recov_check_line_version(struct pblk *pblk,
  438. struct line_emeta *emeta)
  439. {
  440. struct line_header *header = &emeta->header;
  441. if (header->version_major != EMETA_VERSION_MAJOR) {
  442. pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
  443. header->version_major, EMETA_VERSION_MAJOR);
  444. return 1;
  445. }
  446. #ifdef CONFIG_NVM_PBLK_DEBUG
  447. if (header->version_minor > EMETA_VERSION_MINOR)
  448. pblk_info(pblk, "newer line minor version found: %d\n",
  449. header->version_minor);
  450. #endif
  451. return 0;
  452. }
  453. static void pblk_recov_wa_counters(struct pblk *pblk,
  454. struct line_emeta *emeta)
  455. {
  456. struct pblk_line_meta *lm = &pblk->lm;
  457. struct line_header *header = &emeta->header;
  458. struct wa_counters *wa = emeta_to_wa(lm, emeta);
  459. /* WA counters were introduced in emeta version 0.2 */
  460. if (header->version_major > 0 || header->version_minor >= 2) {
  461. u64 user = le64_to_cpu(wa->user);
  462. u64 pad = le64_to_cpu(wa->pad);
  463. u64 gc = le64_to_cpu(wa->gc);
  464. atomic64_set(&pblk->user_wa, user);
  465. atomic64_set(&pblk->pad_wa, pad);
  466. atomic64_set(&pblk->gc_wa, gc);
  467. pblk->user_rst_wa = user;
  468. pblk->pad_rst_wa = pad;
  469. pblk->gc_rst_wa = gc;
  470. }
  471. }
  472. static int pblk_line_was_written(struct pblk_line *line,
  473. struct pblk *pblk)
  474. {
  475. struct pblk_line_meta *lm = &pblk->lm;
  476. struct nvm_tgt_dev *dev = pblk->dev;
  477. struct nvm_geo *geo = &dev->geo;
  478. struct nvm_chk_meta *chunk;
  479. struct ppa_addr bppa;
  480. int smeta_blk;
  481. if (line->state == PBLK_LINESTATE_BAD)
  482. return 0;
  483. smeta_blk = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
  484. if (smeta_blk >= lm->blk_per_line)
  485. return 0;
  486. bppa = pblk->luns[smeta_blk].bppa;
  487. chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
  488. if (chunk->state & NVM_CHK_ST_FREE)
  489. return 0;
  490. return 1;
  491. }
  492. static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
  493. {
  494. struct pblk_line_meta *lm = &pblk->lm;
  495. int i;
  496. for (i = 0; i < lm->blk_per_line; i++)
  497. if (line->chks[i].state & NVM_CHK_ST_OPEN)
  498. return true;
  499. return false;
  500. }
  501. struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
  502. {
  503. struct pblk_line_meta *lm = &pblk->lm;
  504. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  505. struct pblk_line *line, *tline, *data_line = NULL;
  506. struct pblk_smeta *smeta;
  507. struct pblk_emeta *emeta;
  508. struct line_smeta *smeta_buf;
  509. int found_lines = 0, recovered_lines = 0, open_lines = 0;
  510. int is_next = 0;
  511. int meta_line;
  512. int i, valid_uuid = 0;
  513. LIST_HEAD(recov_list);
  514. /* TODO: Implement FTL snapshot */
  515. /* Scan recovery - takes place when FTL snapshot fails */
  516. spin_lock(&l_mg->free_lock);
  517. meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  518. set_bit(meta_line, &l_mg->meta_bitmap);
  519. smeta = l_mg->sline_meta[meta_line];
  520. emeta = l_mg->eline_meta[meta_line];
  521. smeta_buf = (struct line_smeta *)smeta;
  522. spin_unlock(&l_mg->free_lock);
  523. /* Order data lines using their sequence number */
  524. for (i = 0; i < l_mg->nr_lines; i++) {
  525. u32 crc;
  526. line = &pblk->lines[i];
  527. memset(smeta, 0, lm->smeta_len);
  528. line->smeta = smeta;
  529. line->lun_bitmap = ((void *)(smeta_buf)) +
  530. sizeof(struct line_smeta);
  531. if (!pblk_line_was_written(line, pblk))
  532. continue;
  533. /* Lines that cannot be read are assumed as not written here */
  534. if (pblk_line_smeta_read(pblk, line))
  535. continue;
  536. crc = pblk_calc_smeta_crc(pblk, smeta_buf);
  537. if (le32_to_cpu(smeta_buf->crc) != crc)
  538. continue;
  539. if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
  540. continue;
  541. if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
  542. pblk_err(pblk, "found incompatible line version %u\n",
  543. smeta_buf->header.version_major);
  544. return ERR_PTR(-EINVAL);
  545. }
  546. /* The first valid instance uuid is used for initialization */
  547. if (!valid_uuid) {
  548. memcpy(pblk->instance_uuid, smeta_buf->header.uuid, 16);
  549. valid_uuid = 1;
  550. }
  551. if (memcmp(pblk->instance_uuid, smeta_buf->header.uuid, 16)) {
  552. pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
  553. i);
  554. continue;
  555. }
  556. /* Update line metadata */
  557. spin_lock(&line->lock);
  558. line->id = le32_to_cpu(smeta_buf->header.id);
  559. line->type = le16_to_cpu(smeta_buf->header.type);
  560. line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
  561. spin_unlock(&line->lock);
  562. /* Update general metadata */
  563. spin_lock(&l_mg->free_lock);
  564. if (line->seq_nr >= l_mg->d_seq_nr)
  565. l_mg->d_seq_nr = line->seq_nr + 1;
  566. l_mg->nr_free_lines--;
  567. spin_unlock(&l_mg->free_lock);
  568. if (pblk_line_recov_alloc(pblk, line))
  569. goto out;
  570. pblk_recov_line_add_ordered(&recov_list, line);
  571. found_lines++;
  572. pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
  573. line->id, smeta_buf->seq_nr);
  574. }
  575. if (!found_lines) {
  576. pblk_setup_uuid(pblk);
  577. spin_lock(&l_mg->free_lock);
  578. WARN_ON_ONCE(!test_and_clear_bit(meta_line,
  579. &l_mg->meta_bitmap));
  580. spin_unlock(&l_mg->free_lock);
  581. goto out;
  582. }
  583. /* Verify closed blocks and recover this portion of L2P table*/
  584. list_for_each_entry_safe(line, tline, &recov_list, list) {
  585. recovered_lines++;
  586. line->emeta_ssec = pblk_line_emeta_start(pblk, line);
  587. line->emeta = emeta;
  588. memset(line->emeta->buf, 0, lm->emeta_len[0]);
  589. if (pblk_line_is_open(pblk, line)) {
  590. pblk_recov_l2p_from_oob(pblk, line);
  591. goto next;
  592. }
  593. if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) {
  594. pblk_recov_l2p_from_oob(pblk, line);
  595. goto next;
  596. }
  597. if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
  598. pblk_recov_l2p_from_oob(pblk, line);
  599. goto next;
  600. }
  601. if (pblk_recov_check_line_version(pblk, line->emeta->buf))
  602. return ERR_PTR(-EINVAL);
  603. pblk_recov_wa_counters(pblk, line->emeta->buf);
  604. if (pblk_recov_l2p_from_emeta(pblk, line))
  605. pblk_recov_l2p_from_oob(pblk, line);
  606. next:
  607. if (pblk_line_is_full(line)) {
  608. struct list_head *move_list;
  609. spin_lock(&line->lock);
  610. line->state = PBLK_LINESTATE_CLOSED;
  611. trace_pblk_line_state(pblk_disk_name(pblk), line->id,
  612. line->state);
  613. move_list = pblk_line_gc_list(pblk, line);
  614. spin_unlock(&line->lock);
  615. spin_lock(&l_mg->gc_lock);
  616. list_move_tail(&line->list, move_list);
  617. spin_unlock(&l_mg->gc_lock);
  618. mempool_free(line->map_bitmap, l_mg->bitmap_pool);
  619. line->map_bitmap = NULL;
  620. line->smeta = NULL;
  621. line->emeta = NULL;
  622. } else {
  623. spin_lock(&line->lock);
  624. line->state = PBLK_LINESTATE_OPEN;
  625. spin_unlock(&line->lock);
  626. line->emeta->mem = 0;
  627. atomic_set(&line->emeta->sync, 0);
  628. trace_pblk_line_state(pblk_disk_name(pblk), line->id,
  629. line->state);
  630. data_line = line;
  631. line->meta_line = meta_line;
  632. open_lines++;
  633. }
  634. }
  635. if (!open_lines) {
  636. spin_lock(&l_mg->free_lock);
  637. WARN_ON_ONCE(!test_and_clear_bit(meta_line,
  638. &l_mg->meta_bitmap));
  639. spin_unlock(&l_mg->free_lock);
  640. pblk_line_replace_data(pblk);
  641. } else {
  642. spin_lock(&l_mg->free_lock);
  643. /* Allocate next line for preparation */
  644. l_mg->data_next = pblk_line_get(pblk);
  645. if (l_mg->data_next) {
  646. l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
  647. l_mg->data_next->type = PBLK_LINETYPE_DATA;
  648. is_next = 1;
  649. }
  650. spin_unlock(&l_mg->free_lock);
  651. }
  652. if (is_next)
  653. pblk_line_erase(pblk, l_mg->data_next);
  654. out:
  655. if (found_lines != recovered_lines)
  656. pblk_err(pblk, "failed to recover all found lines %d/%d\n",
  657. found_lines, recovered_lines);
  658. return data_line;
  659. }
  660. /*
  661. * Pad current line
  662. */
  663. int pblk_recov_pad(struct pblk *pblk)
  664. {
  665. struct pblk_line *line;
  666. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  667. int left_msecs;
  668. int ret = 0;
  669. spin_lock(&l_mg->free_lock);
  670. line = l_mg->data_line;
  671. left_msecs = line->left_msecs;
  672. spin_unlock(&l_mg->free_lock);
  673. ret = pblk_recov_pad_line(pblk, line, left_msecs);
  674. if (ret) {
  675. pblk_err(pblk, "tear down padding failed (%d)\n", ret);
  676. return ret;
  677. }
  678. pblk_line_close_meta(pblk, line);
  679. return ret;
  680. }