sysblk.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. /*
  2. * Copyright (C) 2015 Matias Bjorling. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License version
  6. * 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; see the file COPYING. If not, write to
  15. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  16. * USA.
  17. *
  18. */
  19. #include <linux/lightnvm.h>
  20. #define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
  21. #define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
  22. * enables ~1.5M updates per sysblk unit
  23. */
  24. struct sysblk_scan {
  25. /* A row is a collection of flash blocks for a system block. */
  26. int nr_rows;
  27. int row;
  28. int act_blk[MAX_SYSBLKS];
  29. int nr_ppas;
  30. struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
  31. };
  32. static inline int scan_ppa_idx(int row, int blkid)
  33. {
  34. return (row * MAX_BLKS_PR_SYSBLK) + blkid;
  35. }
  36. void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
  37. {
  38. info->seqnr = be32_to_cpu(sb->seqnr);
  39. info->erase_cnt = be32_to_cpu(sb->erase_cnt);
  40. info->version = be16_to_cpu(sb->version);
  41. strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
  42. info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
  43. }
  44. void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info)
  45. {
  46. sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
  47. sb->seqnr = cpu_to_be32(info->seqnr);
  48. sb->erase_cnt = cpu_to_be32(info->erase_cnt);
  49. sb->version = cpu_to_be16(info->version);
  50. strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
  51. sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
  52. }
  53. static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
  54. {
  55. int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
  56. int i;
  57. for (i = 0; i < nr_rows; i++)
  58. sysblk_ppas[i].ppa = 0;
  59. /* if possible, place sysblk at first channel, middle channel and last
  60. * channel of the device. If not, create only one or two sys blocks
  61. */
  62. switch (dev->nr_chnls) {
  63. case 2:
  64. sysblk_ppas[1].g.ch = 1;
  65. /* fall-through */
  66. case 1:
  67. sysblk_ppas[0].g.ch = 0;
  68. break;
  69. default:
  70. sysblk_ppas[0].g.ch = 0;
  71. sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
  72. sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
  73. break;
  74. }
  75. return nr_rows;
  76. }
  77. void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
  78. struct ppa_addr *sysblk_ppas)
  79. {
  80. memset(s, 0, sizeof(struct sysblk_scan));
  81. s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
  82. }
  83. static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
  84. u8 *blks, int nr_blks,
  85. struct sysblk_scan *s)
  86. {
  87. struct ppa_addr *sppa;
  88. int i, blkid = 0;
  89. nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
  90. if (nr_blks < 0)
  91. return nr_blks;
  92. for (i = 0; i < nr_blks; i++) {
  93. if (blks[i] == NVM_BLK_T_HOST)
  94. return -EEXIST;
  95. if (blks[i] != NVM_BLK_T_FREE)
  96. continue;
  97. sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
  98. sppa->g.ch = ppa.g.ch;
  99. sppa->g.lun = ppa.g.lun;
  100. sppa->g.blk = i;
  101. s->nr_ppas++;
  102. blkid++;
  103. pr_debug("nvm: use (%u %u %u) as sysblk\n",
  104. sppa->g.ch, sppa->g.lun, sppa->g.blk);
  105. if (blkid > MAX_BLKS_PR_SYSBLK - 1)
  106. return 0;
  107. }
  108. pr_err("nvm: sysblk failed get sysblk\n");
  109. return -EINVAL;
  110. }
  111. static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
  112. u8 *blks, int nr_blks,
  113. struct sysblk_scan *s)
  114. {
  115. int i, nr_sysblk = 0;
  116. nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
  117. if (nr_blks < 0)
  118. return nr_blks;
  119. for (i = 0; i < nr_blks; i++) {
  120. if (blks[i] != NVM_BLK_T_HOST)
  121. continue;
  122. if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
  123. pr_err("nvm: too many host blks\n");
  124. return -EINVAL;
  125. }
  126. ppa.g.blk = i;
  127. s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
  128. s->nr_ppas++;
  129. nr_sysblk++;
  130. }
  131. return 0;
  132. }
  133. static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
  134. struct ppa_addr *ppas, int get_free)
  135. {
  136. int i, nr_blks, ret = 0;
  137. u8 *blks;
  138. s->nr_ppas = 0;
  139. nr_blks = dev->blks_per_lun * dev->plane_mode;
  140. blks = kmalloc(nr_blks, GFP_KERNEL);
  141. if (!blks)
  142. return -ENOMEM;
  143. for (i = 0; i < s->nr_rows; i++) {
  144. s->row = i;
  145. ret = nvm_get_bb_tbl(dev, ppas[i], blks);
  146. if (ret) {
  147. pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
  148. ppas[i].g.ch,
  149. ppas[i].g.blk);
  150. goto err_get;
  151. }
  152. if (get_free)
  153. ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks,
  154. s);
  155. else
  156. ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks,
  157. s);
  158. if (ret)
  159. goto err_get;
  160. }
  161. err_get:
  162. kfree(blks);
  163. return ret;
  164. }
  165. /*
  166. * scans a block for latest sysblk.
  167. * Returns:
  168. * 0 - newer sysblk not found. PPA is updated to latest page.
  169. * 1 - newer sysblk found and stored in *cur. PPA is updated to
  170. * next valid page.
  171. * <0- error.
  172. */
  173. static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
  174. struct nvm_system_block *sblk)
  175. {
  176. struct nvm_system_block *cur;
  177. int pg, ret, found = 0;
  178. /* the full buffer for a flash page is allocated. Only the first of it
  179. * contains the system block information
  180. */
  181. cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
  182. if (!cur)
  183. return -ENOMEM;
  184. /* perform linear scan through the block */
  185. for (pg = 0; pg < dev->lps_per_blk; pg++) {
  186. ppa->g.pg = ppa_to_slc(dev, pg);
  187. ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
  188. cur, dev->pfpg_size);
  189. if (ret) {
  190. if (ret == NVM_RSP_ERR_EMPTYPAGE) {
  191. pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
  192. ppa->g.ch,
  193. ppa->g.lun,
  194. ppa->g.blk,
  195. ppa->g.pg);
  196. break;
  197. }
  198. pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
  199. ret,
  200. ppa->g.ch,
  201. ppa->g.lun,
  202. ppa->g.blk,
  203. ppa->g.pg);
  204. break; /* if we can't read a page, continue to the
  205. * next blk
  206. */
  207. }
  208. if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
  209. pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
  210. ppa->g.ch,
  211. ppa->g.lun,
  212. ppa->g.blk,
  213. ppa->g.pg);
  214. break; /* last valid page already found */
  215. }
  216. if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
  217. continue;
  218. memcpy(sblk, cur, sizeof(struct nvm_system_block));
  219. found = 1;
  220. }
  221. kfree(cur);
  222. return found;
  223. }
  224. static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
  225. {
  226. struct nvm_rq rqd;
  227. int ret;
  228. if (s->nr_ppas > dev->ops->max_phys_sect) {
  229. pr_err("nvm: unable to update all sysblocks atomically\n");
  230. return -EINVAL;
  231. }
  232. memset(&rqd, 0, sizeof(struct nvm_rq));
  233. nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
  234. nvm_generic_to_addr_mode(dev, &rqd);
  235. ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
  236. nvm_free_rqd_ppalist(dev, &rqd);
  237. if (ret) {
  238. pr_err("nvm: sysblk failed bb mark\n");
  239. return -EINVAL;
  240. }
  241. return 0;
  242. }
  243. static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
  244. struct sysblk_scan *s)
  245. {
  246. struct nvm_system_block nvmsb;
  247. void *buf;
  248. int i, sect, ret = 0;
  249. struct ppa_addr *ppas;
  250. nvm_cpu_to_sysblk(&nvmsb, info);
  251. buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
  252. if (!buf)
  253. return -ENOMEM;
  254. memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
  255. ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
  256. if (!ppas) {
  257. ret = -ENOMEM;
  258. goto err;
  259. }
  260. /* Write and verify */
  261. for (i = 0; i < s->nr_rows; i++) {
  262. ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
  263. pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
  264. ppas[0].g.ch,
  265. ppas[0].g.lun,
  266. ppas[0].g.blk,
  267. ppas[0].g.pg);
  268. /* Expand to all sectors within a flash page */
  269. if (dev->sec_per_pg > 1) {
  270. for (sect = 1; sect < dev->sec_per_pg; sect++) {
  271. ppas[sect].ppa = ppas[0].ppa;
  272. ppas[sect].g.sec = sect;
  273. }
  274. }
  275. ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
  276. NVM_IO_SLC_MODE, buf, dev->pfpg_size);
  277. if (ret) {
  278. pr_err("nvm: sysblk failed program (%u %u %u)\n",
  279. ppas[0].g.ch,
  280. ppas[0].g.lun,
  281. ppas[0].g.blk);
  282. break;
  283. }
  284. ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
  285. NVM_IO_SLC_MODE, buf, dev->pfpg_size);
  286. if (ret) {
  287. pr_err("nvm: sysblk failed read (%u %u %u)\n",
  288. ppas[0].g.ch,
  289. ppas[0].g.lun,
  290. ppas[0].g.blk);
  291. break;
  292. }
  293. if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
  294. pr_err("nvm: sysblk failed verify (%u %u %u)\n",
  295. ppas[0].g.ch,
  296. ppas[0].g.lun,
  297. ppas[0].g.blk);
  298. ret = -EINVAL;
  299. break;
  300. }
  301. }
  302. kfree(ppas);
  303. err:
  304. kfree(buf);
  305. return ret;
  306. }
  307. static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
  308. {
  309. int i, ret;
  310. unsigned long nxt_blk;
  311. struct ppa_addr *ppa;
  312. for (i = 0; i < s->nr_rows; i++) {
  313. nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
  314. ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
  315. ppa->g.pg = ppa_to_slc(dev, 0);
  316. ret = nvm_erase_ppa(dev, ppa, 1);
  317. if (ret)
  318. return ret;
  319. s->act_blk[i] = nxt_blk;
  320. }
  321. return 0;
  322. }
  323. int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
  324. {
  325. struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
  326. struct sysblk_scan s;
  327. struct nvm_system_block *cur;
  328. int i, j, found = 0;
  329. int ret = -ENOMEM;
  330. /*
  331. * 1. setup sysblk locations
  332. * 2. get bad block list
  333. * 3. filter on host-specific (type 3)
  334. * 4. iterate through all and find the highest seq nr.
  335. * 5. return superblock information
  336. */
  337. if (!dev->ops->get_bb_tbl)
  338. return -EINVAL;
  339. nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
  340. mutex_lock(&dev->mlock);
  341. ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
  342. if (ret)
  343. goto err_sysblk;
  344. /* no sysblocks initialized */
  345. if (!s.nr_ppas)
  346. goto err_sysblk;
  347. cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
  348. if (!cur)
  349. goto err_sysblk;
  350. /* find the latest block across all sysblocks */
  351. for (i = 0; i < s.nr_rows; i++) {
  352. for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
  353. struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
  354. ret = nvm_scan_block(dev, &ppa, cur);
  355. if (ret > 0)
  356. found = 1;
  357. else if (ret < 0)
  358. break;
  359. }
  360. }
  361. nvm_sysblk_to_cpu(info, cur);
  362. kfree(cur);
  363. err_sysblk:
  364. mutex_unlock(&dev->mlock);
  365. if (found)
  366. return 1;
  367. return ret;
  368. }
  369. int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
  370. {
  371. /* 1. for each latest superblock
  372. * 2. if room
  373. * a. write new flash page entry with the updated information
  374. * 3. if no room
  375. * a. find next available block on lun (linear search)
  376. * if none, continue to next lun
  377. * if none at all, report error. also report that it wasn't
  378. * possible to write to all superblocks.
  379. * c. write data to block.
  380. */
  381. struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
  382. struct sysblk_scan s;
  383. struct nvm_system_block *cur;
  384. int i, j, ppaidx, found = 0;
  385. int ret = -ENOMEM;
  386. if (!dev->ops->get_bb_tbl)
  387. return -EINVAL;
  388. nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
  389. mutex_lock(&dev->mlock);
  390. ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
  391. if (ret)
  392. goto err_sysblk;
  393. cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
  394. if (!cur)
  395. goto err_sysblk;
  396. /* Get the latest sysblk for each sysblk row */
  397. for (i = 0; i < s.nr_rows; i++) {
  398. found = 0;
  399. for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
  400. ppaidx = scan_ppa_idx(i, j);
  401. ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
  402. if (ret > 0) {
  403. s.act_blk[i] = j;
  404. found = 1;
  405. } else if (ret < 0)
  406. break;
  407. }
  408. }
  409. if (!found) {
  410. pr_err("nvm: no valid sysblks found to update\n");
  411. ret = -EINVAL;
  412. goto err_cur;
  413. }
  414. /*
  415. * All sysblocks found. Check that they have same page id in their flash
  416. * blocks
  417. */
  418. for (i = 1; i < s.nr_rows; i++) {
  419. struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
  420. struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
  421. if (l.g.pg != r.g.pg) {
  422. pr_err("nvm: sysblks not on same page. Previous update failed.\n");
  423. ret = -EINVAL;
  424. goto err_cur;
  425. }
  426. }
  427. /*
  428. * Check that there haven't been another update to the seqnr since we
  429. * began
  430. */
  431. if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
  432. pr_err("nvm: seq is not sequential\n");
  433. ret = -EINVAL;
  434. goto err_cur;
  435. }
  436. /*
  437. * When all pages in a block has been written, a new block is selected
  438. * and writing is performed on the new block.
  439. */
  440. if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
  441. dev->lps_per_blk - 1) {
  442. ret = nvm_prepare_new_sysblks(dev, &s);
  443. if (ret)
  444. goto err_cur;
  445. }
  446. ret = nvm_write_and_verify(dev, new, &s);
  447. err_cur:
  448. kfree(cur);
  449. err_sysblk:
  450. mutex_unlock(&dev->mlock);
  451. return ret;
  452. }
  453. int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
  454. {
  455. struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
  456. struct sysblk_scan s;
  457. int ret;
  458. /*
  459. * 1. select master blocks and select first available blks
  460. * 2. get bad block list
  461. * 3. mark MAX_SYSBLKS block as host-based device allocated.
  462. * 4. write and verify data to block
  463. */
  464. if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
  465. return -EINVAL;
  466. if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
  467. pr_err("nvm: memory does not support SLC access\n");
  468. return -EINVAL;
  469. }
  470. /* Index all sysblocks and mark them as host-driven */
  471. nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
  472. mutex_lock(&dev->mlock);
  473. ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1);
  474. if (ret)
  475. goto err_mark;
  476. ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
  477. if (ret)
  478. goto err_mark;
  479. /* Write to the first block of each row */
  480. ret = nvm_write_and_verify(dev, info, &s);
  481. err_mark:
  482. mutex_unlock(&dev->mlock);
  483. return ret;
  484. }
  485. static int factory_nblks(int nblks)
  486. {
  487. /* Round up to nearest BITS_PER_LONG */
  488. return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
  489. }
  490. static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa)
  491. {
  492. int nblks = factory_nblks(dev->blks_per_lun);
  493. return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
  494. BITS_PER_LONG;
  495. }
  496. static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
  497. u8 *blks, int nr_blks,
  498. unsigned long *blk_bitmap, int flags)
  499. {
  500. int i, lunoff;
  501. nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
  502. if (nr_blks < 0)
  503. return nr_blks;
  504. lunoff = factory_blk_offset(dev, ppa);
  505. /* non-set bits correspond to the block must be erased */
  506. for (i = 0; i < nr_blks; i++) {
  507. switch (blks[i]) {
  508. case NVM_BLK_T_FREE:
  509. if (flags & NVM_FACTORY_ERASE_ONLY_USER)
  510. set_bit(i, &blk_bitmap[lunoff]);
  511. break;
  512. case NVM_BLK_T_HOST:
  513. if (!(flags & NVM_FACTORY_RESET_HOST_BLKS))
  514. set_bit(i, &blk_bitmap[lunoff]);
  515. break;
  516. case NVM_BLK_T_GRWN_BAD:
  517. if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS))
  518. set_bit(i, &blk_bitmap[lunoff]);
  519. break;
  520. default:
  521. set_bit(i, &blk_bitmap[lunoff]);
  522. break;
  523. }
  524. }
  525. return 0;
  526. }
  527. static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
  528. int max_ppas, unsigned long *blk_bitmap)
  529. {
  530. struct ppa_addr ppa;
  531. int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
  532. unsigned long *offset;
  533. while (!done) {
  534. done = 1;
  535. nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
  536. idx = factory_blk_offset(dev, ppa);
  537. offset = &blk_bitmap[idx];
  538. blkid = find_first_zero_bit(offset,
  539. dev->blks_per_lun);
  540. if (blkid >= dev->blks_per_lun)
  541. continue;
  542. set_bit(blkid, offset);
  543. ppa.g.blk = blkid;
  544. pr_debug("nvm: erase ppa (%u %u %u)\n",
  545. ppa.g.ch,
  546. ppa.g.lun,
  547. ppa.g.blk);
  548. erase_list[ppa_cnt] = ppa;
  549. ppa_cnt++;
  550. done = 0;
  551. if (ppa_cnt == max_ppas)
  552. return ppa_cnt;
  553. }
  554. }
  555. return ppa_cnt;
  556. }
  557. static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
  558. int flags)
  559. {
  560. struct ppa_addr ppa;
  561. int ch, lun, nr_blks, ret = 0;
  562. u8 *blks;
  563. nr_blks = dev->blks_per_lun * dev->plane_mode;
  564. blks = kmalloc(nr_blks, GFP_KERNEL);
  565. if (!blks)
  566. return -ENOMEM;
  567. nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
  568. ret = nvm_get_bb_tbl(dev, ppa, blks);
  569. if (ret)
  570. pr_err("nvm: failed bb tbl for ch%u lun%u\n",
  571. ppa.g.ch, ppa.g.blk);
  572. ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap,
  573. flags);
  574. if (ret)
  575. break;
  576. }
  577. kfree(blks);
  578. return ret;
  579. }
  580. int nvm_dev_factory(struct nvm_dev *dev, int flags)
  581. {
  582. struct ppa_addr *ppas;
  583. int ppa_cnt, ret = -ENOMEM;
  584. int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
  585. struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
  586. struct sysblk_scan s;
  587. unsigned long *blk_bitmap;
  588. blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
  589. GFP_KERNEL);
  590. if (!blk_bitmap)
  591. return ret;
  592. ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
  593. if (!ppas)
  594. goto err_blks;
  595. /* create list of blks to be erased */
  596. ret = nvm_fact_select_blks(dev, blk_bitmap, flags);
  597. if (ret)
  598. goto err_ppas;
  599. /* continue to erase until list of blks until empty */
  600. while ((ppa_cnt =
  601. nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
  602. nvm_erase_ppa(dev, ppas, ppa_cnt);
  603. /* mark host reserved blocks free */
  604. if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
  605. nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
  606. mutex_lock(&dev->mlock);
  607. ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
  608. if (!ret)
  609. ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
  610. mutex_unlock(&dev->mlock);
  611. }
  612. err_ppas:
  613. kfree(ppas);
  614. err_blks:
  615. kfree(blk_bitmap);
  616. return ret;
  617. }
  618. EXPORT_SYMBOL(nvm_dev_factory);