sysblk.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. /*
  2. * Copyright (C) 2015 Matias Bjorling. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License version
  6. * 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; see the file COPYING. If not, write to
  15. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  16. * USA.
  17. *
  18. */
  19. #include <linux/lightnvm.h>
  20. #define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
  21. #define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
  22. * enables ~1.5M updates per sysblk unit
  23. */
  24. struct sysblk_scan {
  25. /* A row is a collection of flash blocks for a system block. */
  26. int nr_rows;
  27. int row;
  28. int act_blk[MAX_SYSBLKS];
  29. int nr_ppas;
  30. struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
  31. };
  32. static inline int scan_ppa_idx(int row, int blkid)
  33. {
  34. return (row * MAX_BLKS_PR_SYSBLK) + blkid;
  35. }
  36. static void nvm_sysblk_to_cpu(struct nvm_sb_info *info,
  37. struct nvm_system_block *sb)
  38. {
  39. info->seqnr = be32_to_cpu(sb->seqnr);
  40. info->erase_cnt = be32_to_cpu(sb->erase_cnt);
  41. info->version = be16_to_cpu(sb->version);
  42. strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
  43. info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
  44. }
  45. static void nvm_cpu_to_sysblk(struct nvm_system_block *sb,
  46. struct nvm_sb_info *info)
  47. {
  48. sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
  49. sb->seqnr = cpu_to_be32(info->seqnr);
  50. sb->erase_cnt = cpu_to_be32(info->erase_cnt);
  51. sb->version = cpu_to_be16(info->version);
  52. strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
  53. sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
  54. }
  55. static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
  56. {
  57. struct nvm_geo *geo = &dev->geo;
  58. int nr_rows = min_t(int, MAX_SYSBLKS, geo->nr_chnls);
  59. int i;
  60. for (i = 0; i < nr_rows; i++)
  61. sysblk_ppas[i].ppa = 0;
  62. /* if possible, place sysblk at first channel, middle channel and last
  63. * channel of the device. If not, create only one or two sys blocks
  64. */
  65. switch (geo->nr_chnls) {
  66. case 2:
  67. sysblk_ppas[1].g.ch = 1;
  68. /* fall-through */
  69. case 1:
  70. sysblk_ppas[0].g.ch = 0;
  71. break;
  72. default:
  73. sysblk_ppas[0].g.ch = 0;
  74. sysblk_ppas[1].g.ch = geo->nr_chnls / 2;
  75. sysblk_ppas[2].g.ch = geo->nr_chnls - 1;
  76. break;
  77. }
  78. return nr_rows;
  79. }
  80. static void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
  81. struct ppa_addr *sysblk_ppas)
  82. {
  83. memset(s, 0, sizeof(struct sysblk_scan));
  84. s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
  85. }
  86. static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
  87. u8 *blks, int nr_blks,
  88. struct sysblk_scan *s)
  89. {
  90. struct ppa_addr *sppa;
  91. int i, blkid = 0;
  92. nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
  93. if (nr_blks < 0)
  94. return nr_blks;
  95. for (i = 0; i < nr_blks; i++) {
  96. if (blks[i] == NVM_BLK_T_HOST)
  97. return -EEXIST;
  98. if (blks[i] != NVM_BLK_T_FREE)
  99. continue;
  100. sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
  101. sppa->g.ch = ppa.g.ch;
  102. sppa->g.lun = ppa.g.lun;
  103. sppa->g.blk = i;
  104. s->nr_ppas++;
  105. blkid++;
  106. pr_debug("nvm: use (%u %u %u) as sysblk\n",
  107. sppa->g.ch, sppa->g.lun, sppa->g.blk);
  108. if (blkid > MAX_BLKS_PR_SYSBLK - 1)
  109. return 0;
  110. }
  111. pr_err("nvm: sysblk failed get sysblk\n");
  112. return -EINVAL;
  113. }
  114. static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
  115. u8 *blks, int nr_blks,
  116. struct sysblk_scan *s)
  117. {
  118. int i, nr_sysblk = 0;
  119. nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
  120. if (nr_blks < 0)
  121. return nr_blks;
  122. for (i = 0; i < nr_blks; i++) {
  123. if (blks[i] != NVM_BLK_T_HOST)
  124. continue;
  125. if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
  126. pr_err("nvm: too many host blks\n");
  127. return -EINVAL;
  128. }
  129. ppa.g.blk = i;
  130. s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
  131. s->nr_ppas++;
  132. nr_sysblk++;
  133. }
  134. return 0;
  135. }
  136. static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
  137. struct ppa_addr *ppas, int get_free)
  138. {
  139. struct nvm_geo *geo = &dev->geo;
  140. int i, nr_blks, ret = 0;
  141. u8 *blks;
  142. s->nr_ppas = 0;
  143. nr_blks = geo->blks_per_lun * geo->plane_mode;
  144. blks = kmalloc(nr_blks, GFP_KERNEL);
  145. if (!blks)
  146. return -ENOMEM;
  147. for (i = 0; i < s->nr_rows; i++) {
  148. s->row = i;
  149. ret = nvm_get_bb_tbl(dev, ppas[i], blks);
  150. if (ret) {
  151. pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
  152. ppas[i].g.ch,
  153. ppas[i].g.blk);
  154. goto err_get;
  155. }
  156. if (get_free)
  157. ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks,
  158. s);
  159. else
  160. ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks,
  161. s);
  162. if (ret)
  163. goto err_get;
  164. }
  165. err_get:
  166. kfree(blks);
  167. return ret;
  168. }
  169. /*
  170. * scans a block for latest sysblk.
  171. * Returns:
  172. * 0 - newer sysblk not found. PPA is updated to latest page.
  173. * 1 - newer sysblk found and stored in *cur. PPA is updated to
  174. * next valid page.
  175. * <0- error.
  176. */
  177. static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
  178. struct nvm_system_block *sblk)
  179. {
  180. struct nvm_geo *geo = &dev->geo;
  181. struct nvm_system_block *cur;
  182. int pg, ret, found = 0;
  183. /* the full buffer for a flash page is allocated. Only the first of it
  184. * contains the system block information
  185. */
  186. cur = kmalloc(geo->pfpg_size, GFP_KERNEL);
  187. if (!cur)
  188. return -ENOMEM;
  189. /* perform linear scan through the block */
  190. for (pg = 0; pg < dev->lps_per_blk; pg++) {
  191. ppa->g.pg = ppa_to_slc(dev, pg);
  192. ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
  193. cur, geo->pfpg_size);
  194. if (ret) {
  195. if (ret == NVM_RSP_ERR_EMPTYPAGE) {
  196. pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
  197. ppa->g.ch,
  198. ppa->g.lun,
  199. ppa->g.blk,
  200. ppa->g.pg);
  201. break;
  202. }
  203. pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
  204. ret,
  205. ppa->g.ch,
  206. ppa->g.lun,
  207. ppa->g.blk,
  208. ppa->g.pg);
  209. break; /* if we can't read a page, continue to the
  210. * next blk
  211. */
  212. }
  213. if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
  214. pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
  215. ppa->g.ch,
  216. ppa->g.lun,
  217. ppa->g.blk,
  218. ppa->g.pg);
  219. break; /* last valid page already found */
  220. }
  221. if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
  222. continue;
  223. memcpy(sblk, cur, sizeof(struct nvm_system_block));
  224. found = 1;
  225. }
  226. kfree(cur);
  227. return found;
  228. }
  229. static int nvm_sysblk_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s,
  230. int type)
  231. {
  232. return nvm_set_bb_tbl(dev, s->ppas, s->nr_ppas, type);
  233. }
  234. static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
  235. struct sysblk_scan *s)
  236. {
  237. struct nvm_geo *geo = &dev->geo;
  238. struct nvm_system_block nvmsb;
  239. void *buf;
  240. int i, sect, ret = 0;
  241. struct ppa_addr *ppas;
  242. nvm_cpu_to_sysblk(&nvmsb, info);
  243. buf = kzalloc(geo->pfpg_size, GFP_KERNEL);
  244. if (!buf)
  245. return -ENOMEM;
  246. memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
  247. ppas = kcalloc(geo->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
  248. if (!ppas) {
  249. ret = -ENOMEM;
  250. goto err;
  251. }
  252. /* Write and verify */
  253. for (i = 0; i < s->nr_rows; i++) {
  254. ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
  255. pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
  256. ppas[0].g.ch,
  257. ppas[0].g.lun,
  258. ppas[0].g.blk,
  259. ppas[0].g.pg);
  260. /* Expand to all sectors within a flash page */
  261. if (geo->sec_per_pg > 1) {
  262. for (sect = 1; sect < geo->sec_per_pg; sect++) {
  263. ppas[sect].ppa = ppas[0].ppa;
  264. ppas[sect].g.sec = sect;
  265. }
  266. }
  267. ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PWRITE,
  268. NVM_IO_SLC_MODE, buf, geo->pfpg_size);
  269. if (ret) {
  270. pr_err("nvm: sysblk failed program (%u %u %u)\n",
  271. ppas[0].g.ch,
  272. ppas[0].g.lun,
  273. ppas[0].g.blk);
  274. break;
  275. }
  276. ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PREAD,
  277. NVM_IO_SLC_MODE, buf, geo->pfpg_size);
  278. if (ret) {
  279. pr_err("nvm: sysblk failed read (%u %u %u)\n",
  280. ppas[0].g.ch,
  281. ppas[0].g.lun,
  282. ppas[0].g.blk);
  283. break;
  284. }
  285. if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
  286. pr_err("nvm: sysblk failed verify (%u %u %u)\n",
  287. ppas[0].g.ch,
  288. ppas[0].g.lun,
  289. ppas[0].g.blk);
  290. ret = -EINVAL;
  291. break;
  292. }
  293. }
  294. kfree(ppas);
  295. err:
  296. kfree(buf);
  297. return ret;
  298. }
  299. static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
  300. {
  301. int i, ret;
  302. unsigned long nxt_blk;
  303. struct ppa_addr *ppa;
  304. for (i = 0; i < s->nr_rows; i++) {
  305. nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
  306. ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
  307. ppa->g.pg = ppa_to_slc(dev, 0);
  308. ret = nvm_erase_ppa(dev, ppa, 1, 0);
  309. if (ret)
  310. return ret;
  311. s->act_blk[i] = nxt_blk;
  312. }
  313. return 0;
  314. }
  315. int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
  316. {
  317. struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
  318. struct sysblk_scan s;
  319. struct nvm_system_block *cur;
  320. int i, j, found = 0;
  321. int ret = -ENOMEM;
  322. /*
  323. * 1. setup sysblk locations
  324. * 2. get bad block list
  325. * 3. filter on host-specific (type 3)
  326. * 4. iterate through all and find the highest seq nr.
  327. * 5. return superblock information
  328. */
  329. if (!dev->ops->get_bb_tbl)
  330. return -EINVAL;
  331. nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
  332. mutex_lock(&dev->mlock);
  333. ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
  334. if (ret)
  335. goto err_sysblk;
  336. /* no sysblocks initialized */
  337. if (!s.nr_ppas)
  338. goto err_sysblk;
  339. cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
  340. if (!cur)
  341. goto err_sysblk;
  342. /* find the latest block across all sysblocks */
  343. for (i = 0; i < s.nr_rows; i++) {
  344. for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
  345. struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
  346. ret = nvm_scan_block(dev, &ppa, cur);
  347. if (ret > 0)
  348. found = 1;
  349. else if (ret < 0)
  350. break;
  351. }
  352. }
  353. nvm_sysblk_to_cpu(info, cur);
  354. kfree(cur);
  355. err_sysblk:
  356. mutex_unlock(&dev->mlock);
  357. if (found)
  358. return 1;
  359. return ret;
  360. }
  361. int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
  362. {
  363. /* 1. for each latest superblock
  364. * 2. if room
  365. * a. write new flash page entry with the updated information
  366. * 3. if no room
  367. * a. find next available block on lun (linear search)
  368. * if none, continue to next lun
  369. * if none at all, report error. also report that it wasn't
  370. * possible to write to all superblocks.
  371. * c. write data to block.
  372. */
  373. struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
  374. struct sysblk_scan s;
  375. struct nvm_system_block *cur;
  376. int i, j, ppaidx, found = 0;
  377. int ret = -ENOMEM;
  378. if (!dev->ops->get_bb_tbl)
  379. return -EINVAL;
  380. nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
  381. mutex_lock(&dev->mlock);
  382. ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
  383. if (ret)
  384. goto err_sysblk;
  385. cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
  386. if (!cur)
  387. goto err_sysblk;
  388. /* Get the latest sysblk for each sysblk row */
  389. for (i = 0; i < s.nr_rows; i++) {
  390. found = 0;
  391. for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
  392. ppaidx = scan_ppa_idx(i, j);
  393. ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
  394. if (ret > 0) {
  395. s.act_blk[i] = j;
  396. found = 1;
  397. } else if (ret < 0)
  398. break;
  399. }
  400. }
  401. if (!found) {
  402. pr_err("nvm: no valid sysblks found to update\n");
  403. ret = -EINVAL;
  404. goto err_cur;
  405. }
  406. /*
  407. * All sysblocks found. Check that they have same page id in their flash
  408. * blocks
  409. */
  410. for (i = 1; i < s.nr_rows; i++) {
  411. struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
  412. struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
  413. if (l.g.pg != r.g.pg) {
  414. pr_err("nvm: sysblks not on same page. Previous update failed.\n");
  415. ret = -EINVAL;
  416. goto err_cur;
  417. }
  418. }
  419. /*
  420. * Check that there haven't been another update to the seqnr since we
  421. * began
  422. */
  423. if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
  424. pr_err("nvm: seq is not sequential\n");
  425. ret = -EINVAL;
  426. goto err_cur;
  427. }
  428. /*
  429. * When all pages in a block has been written, a new block is selected
  430. * and writing is performed on the new block.
  431. */
  432. if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
  433. dev->lps_per_blk - 1) {
  434. ret = nvm_prepare_new_sysblks(dev, &s);
  435. if (ret)
  436. goto err_cur;
  437. }
  438. ret = nvm_write_and_verify(dev, new, &s);
  439. err_cur:
  440. kfree(cur);
  441. err_sysblk:
  442. mutex_unlock(&dev->mlock);
  443. return ret;
  444. }
  445. int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
  446. {
  447. struct nvm_geo *geo = &dev->geo;
  448. struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
  449. struct sysblk_scan s;
  450. int ret;
  451. /*
  452. * 1. select master blocks and select first available blks
  453. * 2. get bad block list
  454. * 3. mark MAX_SYSBLKS block as host-based device allocated.
  455. * 4. write and verify data to block
  456. */
  457. if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
  458. return -EINVAL;
  459. if (!(geo->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
  460. pr_err("nvm: memory does not support SLC access\n");
  461. return -EINVAL;
  462. }
  463. /* Index all sysblocks and mark them as host-driven */
  464. nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
  465. mutex_lock(&dev->mlock);
  466. ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1);
  467. if (ret)
  468. goto err_mark;
  469. ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
  470. if (ret)
  471. goto err_mark;
  472. /* Write to the first block of each row */
  473. ret = nvm_write_and_verify(dev, info, &s);
  474. err_mark:
  475. mutex_unlock(&dev->mlock);
  476. return ret;
  477. }
  478. static int factory_nblks(int nblks)
  479. {
  480. /* Round up to nearest BITS_PER_LONG */
  481. return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
  482. }
  483. static unsigned int factory_blk_offset(struct nvm_geo *geo, struct ppa_addr ppa)
  484. {
  485. int nblks = factory_nblks(geo->blks_per_lun);
  486. return ((ppa.g.ch * geo->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
  487. BITS_PER_LONG;
  488. }
  489. static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
  490. u8 *blks, int nr_blks,
  491. unsigned long *blk_bitmap, int flags)
  492. {
  493. int i, lunoff;
  494. nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
  495. if (nr_blks < 0)
  496. return nr_blks;
  497. lunoff = factory_blk_offset(&dev->geo, ppa);
  498. /* non-set bits correspond to the block must be erased */
  499. for (i = 0; i < nr_blks; i++) {
  500. switch (blks[i]) {
  501. case NVM_BLK_T_FREE:
  502. if (flags & NVM_FACTORY_ERASE_ONLY_USER)
  503. set_bit(i, &blk_bitmap[lunoff]);
  504. break;
  505. case NVM_BLK_T_HOST:
  506. if (!(flags & NVM_FACTORY_RESET_HOST_BLKS))
  507. set_bit(i, &blk_bitmap[lunoff]);
  508. break;
  509. case NVM_BLK_T_GRWN_BAD:
  510. if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS))
  511. set_bit(i, &blk_bitmap[lunoff]);
  512. break;
  513. default:
  514. set_bit(i, &blk_bitmap[lunoff]);
  515. break;
  516. }
  517. }
  518. return 0;
  519. }
  520. static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
  521. int max_ppas, unsigned long *blk_bitmap)
  522. {
  523. struct nvm_geo *geo = &dev->geo;
  524. struct ppa_addr ppa;
  525. int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
  526. unsigned long *offset;
  527. while (!done) {
  528. done = 1;
  529. nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
  530. idx = factory_blk_offset(geo, ppa);
  531. offset = &blk_bitmap[idx];
  532. blkid = find_first_zero_bit(offset, geo->blks_per_lun);
  533. if (blkid >= geo->blks_per_lun)
  534. continue;
  535. set_bit(blkid, offset);
  536. ppa.g.blk = blkid;
  537. pr_debug("nvm: erase ppa (%u %u %u)\n",
  538. ppa.g.ch,
  539. ppa.g.lun,
  540. ppa.g.blk);
  541. erase_list[ppa_cnt] = ppa;
  542. ppa_cnt++;
  543. done = 0;
  544. if (ppa_cnt == max_ppas)
  545. return ppa_cnt;
  546. }
  547. }
  548. return ppa_cnt;
  549. }
  550. static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
  551. int flags)
  552. {
  553. struct nvm_geo *geo = &dev->geo;
  554. struct ppa_addr ppa;
  555. int ch, lun, nr_blks, ret = 0;
  556. u8 *blks;
  557. nr_blks = geo->blks_per_lun * geo->plane_mode;
  558. blks = kmalloc(nr_blks, GFP_KERNEL);
  559. if (!blks)
  560. return -ENOMEM;
  561. nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
  562. ret = nvm_get_bb_tbl(dev, ppa, blks);
  563. if (ret)
  564. pr_err("nvm: failed bb tbl for ch%u lun%u\n",
  565. ppa.g.ch, ppa.g.blk);
  566. ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap,
  567. flags);
  568. if (ret)
  569. break;
  570. }
  571. kfree(blks);
  572. return ret;
  573. }
  574. int nvm_dev_factory(struct nvm_dev *dev, int flags)
  575. {
  576. struct nvm_geo *geo = &dev->geo;
  577. struct ppa_addr *ppas;
  578. int ppa_cnt, ret = -ENOMEM;
  579. int max_ppas = dev->ops->max_phys_sect / geo->nr_planes;
  580. struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
  581. struct sysblk_scan s;
  582. unsigned long *blk_bitmap;
  583. blk_bitmap = kzalloc(factory_nblks(geo->blks_per_lun) * geo->nr_luns,
  584. GFP_KERNEL);
  585. if (!blk_bitmap)
  586. return ret;
  587. ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
  588. if (!ppas)
  589. goto err_blks;
  590. /* create list of blks to be erased */
  591. ret = nvm_fact_select_blks(dev, blk_bitmap, flags);
  592. if (ret)
  593. goto err_ppas;
  594. /* continue to erase until list of blks until empty */
  595. while ((ppa_cnt =
  596. nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
  597. nvm_erase_ppa(dev, ppas, ppa_cnt, 0);
  598. /* mark host reserved blocks free */
  599. if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
  600. nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
  601. mutex_lock(&dev->mlock);
  602. ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
  603. if (!ret)
  604. ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
  605. mutex_unlock(&dev->mlock);
  606. }
  607. err_ppas:
  608. kfree(ppas);
  609. err_blks:
  610. kfree(blk_bitmap);
  611. return ret;
  612. }
  613. EXPORT_SYMBOL(nvm_dev_factory);