sysblk.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741
  1. /*
  2. * Copyright (C) 2015 Matias Bjorling. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License version
  6. * 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; see the file COPYING. If not, write to
  15. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  16. * USA.
  17. *
  18. */
  19. #include <linux/lightnvm.h>
  20. #define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
  21. #define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
  22. * enables ~1.5M updates per sysblk unit
  23. */
  24. struct sysblk_scan {
  25. /* A row is a collection of flash blocks for a system block. */
  26. int nr_rows;
  27. int row;
  28. int act_blk[MAX_SYSBLKS];
  29. int nr_ppas;
  30. struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
  31. };
  32. static inline int scan_ppa_idx(int row, int blkid)
  33. {
  34. return (row * MAX_BLKS_PR_SYSBLK) + blkid;
  35. }
  36. void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
  37. {
  38. info->seqnr = be32_to_cpu(sb->seqnr);
  39. info->erase_cnt = be32_to_cpu(sb->erase_cnt);
  40. info->version = be16_to_cpu(sb->version);
  41. strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
  42. info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
  43. }
  44. void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info)
  45. {
  46. sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
  47. sb->seqnr = cpu_to_be32(info->seqnr);
  48. sb->erase_cnt = cpu_to_be32(info->erase_cnt);
  49. sb->version = cpu_to_be16(info->version);
  50. strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
  51. sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
  52. }
  53. static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
  54. {
  55. int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
  56. int i;
  57. for (i = 0; i < nr_rows; i++)
  58. sysblk_ppas[i].ppa = 0;
  59. /* if possible, place sysblk at first channel, middle channel and last
  60. * channel of the device. If not, create only one or two sys blocks
  61. */
  62. switch (dev->nr_chnls) {
  63. case 2:
  64. sysblk_ppas[1].g.ch = 1;
  65. /* fall-through */
  66. case 1:
  67. sysblk_ppas[0].g.ch = 0;
  68. break;
  69. default:
  70. sysblk_ppas[0].g.ch = 0;
  71. sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
  72. sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
  73. break;
  74. }
  75. return nr_rows;
  76. }
  77. void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
  78. struct ppa_addr *sysblk_ppas)
  79. {
  80. memset(s, 0, sizeof(struct sysblk_scan));
  81. s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
  82. }
  83. static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
  84. void *private)
  85. {
  86. struct sysblk_scan *s = private;
  87. int i, nr_sysblk = 0;
  88. for (i = 0; i < nr_blks; i++) {
  89. if (blks[i] != NVM_BLK_T_HOST)
  90. continue;
  91. if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
  92. pr_err("nvm: too many host blks\n");
  93. return -EINVAL;
  94. }
  95. ppa.g.blk = i;
  96. s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
  97. s->nr_ppas++;
  98. nr_sysblk++;
  99. }
  100. return 0;
  101. }
  102. static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
  103. struct ppa_addr *ppas, nvm_bb_update_fn *fn)
  104. {
  105. struct ppa_addr dppa;
  106. int i, ret;
  107. s->nr_ppas = 0;
  108. for (i = 0; i < s->nr_rows; i++) {
  109. dppa = generic_to_dev_addr(dev, ppas[i]);
  110. s->row = i;
  111. ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
  112. if (ret) {
  113. pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
  114. ppas[i].g.ch,
  115. ppas[i].g.blk);
  116. return ret;
  117. }
  118. }
  119. return ret;
  120. }
  121. /*
  122. * scans a block for latest sysblk.
  123. * Returns:
  124. * 0 - newer sysblk not found. PPA is updated to latest page.
  125. * 1 - newer sysblk found and stored in *cur. PPA is updated to
  126. * next valid page.
  127. * <0- error.
  128. */
  129. static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
  130. struct nvm_system_block *sblk)
  131. {
  132. struct nvm_system_block *cur;
  133. int pg, cursz, ret, found = 0;
  134. /* the full buffer for a flash page is allocated. Only the first of it
  135. * contains the system block information
  136. */
  137. cursz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
  138. cur = kmalloc(cursz, GFP_KERNEL);
  139. if (!cur)
  140. return -ENOMEM;
  141. /* perform linear scan through the block */
  142. for (pg = 0; pg < dev->lps_per_blk; pg++) {
  143. ppa->g.pg = ppa_to_slc(dev, pg);
  144. ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
  145. cur, cursz);
  146. if (ret) {
  147. if (ret == NVM_RSP_ERR_EMPTYPAGE) {
  148. pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
  149. ppa->g.ch,
  150. ppa->g.lun,
  151. ppa->g.blk,
  152. ppa->g.pg);
  153. break;
  154. }
  155. pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
  156. ret,
  157. ppa->g.ch,
  158. ppa->g.lun,
  159. ppa->g.blk,
  160. ppa->g.pg);
  161. break; /* if we can't read a page, continue to the
  162. * next blk
  163. */
  164. }
  165. if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
  166. pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
  167. ppa->g.ch,
  168. ppa->g.lun,
  169. ppa->g.blk,
  170. ppa->g.pg);
  171. break; /* last valid page already found */
  172. }
  173. if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
  174. continue;
  175. memcpy(sblk, cur, sizeof(struct nvm_system_block));
  176. found = 1;
  177. }
  178. kfree(cur);
  179. return found;
  180. }
  181. static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
  182. {
  183. struct nvm_rq rqd;
  184. int ret;
  185. if (s->nr_ppas > dev->ops->max_phys_sect) {
  186. pr_err("nvm: unable to update all sysblocks atomically\n");
  187. return -EINVAL;
  188. }
  189. memset(&rqd, 0, sizeof(struct nvm_rq));
  190. nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas);
  191. nvm_generic_to_addr_mode(dev, &rqd);
  192. ret = dev->ops->set_bb_tbl(dev, &rqd, type);
  193. nvm_free_rqd_ppalist(dev, &rqd);
  194. if (ret) {
  195. pr_err("nvm: sysblk failed bb mark\n");
  196. return -EINVAL;
  197. }
  198. return 0;
  199. }
  200. static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
  201. void *private)
  202. {
  203. struct sysblk_scan *s = private;
  204. struct ppa_addr *sppa;
  205. int i, blkid = 0;
  206. for (i = 0; i < nr_blks; i++) {
  207. if (blks[i] == NVM_BLK_T_HOST)
  208. return -EEXIST;
  209. if (blks[i] != NVM_BLK_T_FREE)
  210. continue;
  211. sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
  212. sppa->g.ch = ppa.g.ch;
  213. sppa->g.lun = ppa.g.lun;
  214. sppa->g.blk = i;
  215. s->nr_ppas++;
  216. blkid++;
  217. pr_debug("nvm: use (%u %u %u) as sysblk\n",
  218. sppa->g.ch, sppa->g.lun, sppa->g.blk);
  219. if (blkid > MAX_BLKS_PR_SYSBLK - 1)
  220. return 0;
  221. }
  222. pr_err("nvm: sysblk failed get sysblk\n");
  223. return -EINVAL;
  224. }
  225. static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
  226. struct sysblk_scan *s)
  227. {
  228. struct nvm_system_block nvmsb;
  229. void *buf;
  230. int i, sect, ret, bufsz;
  231. struct ppa_addr *ppas;
  232. nvm_cpu_to_sysblk(&nvmsb, info);
  233. /* buffer for flash page */
  234. bufsz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
  235. buf = kzalloc(bufsz, GFP_KERNEL);
  236. if (!buf)
  237. return -ENOMEM;
  238. memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
  239. ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
  240. if (!ppas) {
  241. ret = -ENOMEM;
  242. goto err;
  243. }
  244. /* Write and verify */
  245. for (i = 0; i < s->nr_rows; i++) {
  246. ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
  247. pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
  248. ppas[0].g.ch,
  249. ppas[0].g.lun,
  250. ppas[0].g.blk,
  251. ppas[0].g.pg);
  252. /* Expand to all sectors within a flash page */
  253. if (dev->sec_per_pg > 1) {
  254. for (sect = 1; sect < dev->sec_per_pg; sect++) {
  255. ppas[sect].ppa = ppas[0].ppa;
  256. ppas[sect].g.sec = sect;
  257. }
  258. }
  259. ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
  260. NVM_IO_SLC_MODE, buf, bufsz);
  261. if (ret) {
  262. pr_err("nvm: sysblk failed program (%u %u %u)\n",
  263. ppas[0].g.ch,
  264. ppas[0].g.lun,
  265. ppas[0].g.blk);
  266. break;
  267. }
  268. ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
  269. NVM_IO_SLC_MODE, buf, bufsz);
  270. if (ret) {
  271. pr_err("nvm: sysblk failed read (%u %u %u)\n",
  272. ppas[0].g.ch,
  273. ppas[0].g.lun,
  274. ppas[0].g.blk);
  275. break;
  276. }
  277. if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
  278. pr_err("nvm: sysblk failed verify (%u %u %u)\n",
  279. ppas[0].g.ch,
  280. ppas[0].g.lun,
  281. ppas[0].g.blk);
  282. ret = -EINVAL;
  283. break;
  284. }
  285. }
  286. kfree(ppas);
  287. err:
  288. kfree(buf);
  289. return ret;
  290. }
  291. static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
  292. {
  293. int i, ret;
  294. unsigned long nxt_blk;
  295. struct ppa_addr *ppa;
  296. for (i = 0; i < s->nr_rows; i++) {
  297. nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
  298. ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
  299. ppa->g.pg = ppa_to_slc(dev, 0);
  300. ret = nvm_erase_ppa(dev, ppa, 1);
  301. if (ret)
  302. return ret;
  303. s->act_blk[i] = nxt_blk;
  304. }
  305. return 0;
  306. }
  307. int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
  308. {
  309. struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
  310. struct sysblk_scan s;
  311. struct nvm_system_block *cur;
  312. int i, j, found = 0;
  313. int ret = -ENOMEM;
  314. /*
  315. * 1. setup sysblk locations
  316. * 2. get bad block list
  317. * 3. filter on host-specific (type 3)
  318. * 4. iterate through all and find the highest seq nr.
  319. * 5. return superblock information
  320. */
  321. if (!dev->ops->get_bb_tbl)
  322. return -EINVAL;
  323. nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
  324. mutex_lock(&dev->mlock);
  325. ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
  326. if (ret)
  327. goto err_sysblk;
  328. /* no sysblocks initialized */
  329. if (!s.nr_ppas)
  330. goto err_sysblk;
  331. cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
  332. if (!cur)
  333. goto err_sysblk;
  334. /* find the latest block across all sysblocks */
  335. for (i = 0; i < s.nr_rows; i++) {
  336. for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
  337. struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
  338. ret = nvm_scan_block(dev, &ppa, cur);
  339. if (ret > 0)
  340. found = 1;
  341. else if (ret < 0)
  342. break;
  343. }
  344. }
  345. nvm_sysblk_to_cpu(info, cur);
  346. kfree(cur);
  347. err_sysblk:
  348. mutex_unlock(&dev->mlock);
  349. if (found)
  350. return 1;
  351. return ret;
  352. }
  353. int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
  354. {
  355. /* 1. for each latest superblock
  356. * 2. if room
  357. * a. write new flash page entry with the updated information
  358. * 3. if no room
  359. * a. find next available block on lun (linear search)
  360. * if none, continue to next lun
  361. * if none at all, report error. also report that it wasn't
  362. * possible to write to all superblocks.
  363. * c. write data to block.
  364. */
  365. struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
  366. struct sysblk_scan s;
  367. struct nvm_system_block *cur;
  368. int i, j, ppaidx, found = 0;
  369. int ret = -ENOMEM;
  370. if (!dev->ops->get_bb_tbl)
  371. return -EINVAL;
  372. nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
  373. mutex_lock(&dev->mlock);
  374. ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
  375. if (ret)
  376. goto err_sysblk;
  377. cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
  378. if (!cur)
  379. goto err_sysblk;
  380. /* Get the latest sysblk for each sysblk row */
  381. for (i = 0; i < s.nr_rows; i++) {
  382. found = 0;
  383. for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
  384. ppaidx = scan_ppa_idx(i, j);
  385. ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
  386. if (ret > 0) {
  387. s.act_blk[i] = j;
  388. found = 1;
  389. } else if (ret < 0)
  390. break;
  391. }
  392. }
  393. if (!found) {
  394. pr_err("nvm: no valid sysblks found to update\n");
  395. ret = -EINVAL;
  396. goto err_cur;
  397. }
  398. /*
  399. * All sysblocks found. Check that they have same page id in their flash
  400. * blocks
  401. */
  402. for (i = 1; i < s.nr_rows; i++) {
  403. struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
  404. struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
  405. if (l.g.pg != r.g.pg) {
  406. pr_err("nvm: sysblks not on same page. Previous update failed.\n");
  407. ret = -EINVAL;
  408. goto err_cur;
  409. }
  410. }
  411. /*
  412. * Check that there haven't been another update to the seqnr since we
  413. * began
  414. */
  415. if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
  416. pr_err("nvm: seq is not sequential\n");
  417. ret = -EINVAL;
  418. goto err_cur;
  419. }
  420. /*
  421. * When all pages in a block has been written, a new block is selected
  422. * and writing is performed on the new block.
  423. */
  424. if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
  425. dev->lps_per_blk - 1) {
  426. ret = nvm_prepare_new_sysblks(dev, &s);
  427. if (ret)
  428. goto err_cur;
  429. }
  430. ret = nvm_write_and_verify(dev, new, &s);
  431. err_cur:
  432. kfree(cur);
  433. err_sysblk:
  434. mutex_unlock(&dev->mlock);
  435. return ret;
  436. }
  437. int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
  438. {
  439. struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
  440. struct sysblk_scan s;
  441. int ret;
  442. /*
  443. * 1. select master blocks and select first available blks
  444. * 2. get bad block list
  445. * 3. mark MAX_SYSBLKS block as host-based device allocated.
  446. * 4. write and verify data to block
  447. */
  448. if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
  449. return -EINVAL;
  450. if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
  451. pr_err("nvm: memory does not support SLC access\n");
  452. return -EINVAL;
  453. }
  454. /* Index all sysblocks and mark them as host-driven */
  455. nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
  456. mutex_lock(&dev->mlock);
  457. ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks);
  458. if (ret)
  459. goto err_mark;
  460. ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
  461. if (ret)
  462. goto err_mark;
  463. /* Write to the first block of each row */
  464. ret = nvm_write_and_verify(dev, info, &s);
  465. err_mark:
  466. mutex_unlock(&dev->mlock);
  467. return ret;
  468. }
  469. struct factory_blks {
  470. struct nvm_dev *dev;
  471. int flags;
  472. unsigned long *blks;
  473. };
  474. static int factory_nblks(int nblks)
  475. {
  476. /* Round up to nearest BITS_PER_LONG */
  477. return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
  478. }
  479. static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
  480. {
  481. int nblks = factory_nblks(dev->blks_per_lun);
  482. return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) /
  483. BITS_PER_LONG;
  484. }
  485. static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
  486. void *private)
  487. {
  488. struct factory_blks *f = private;
  489. struct nvm_dev *dev = f->dev;
  490. int i, lunoff;
  491. lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
  492. /* non-set bits correspond to the block must be erased */
  493. for (i = 0; i < nr_blks; i++) {
  494. switch (blks[i]) {
  495. case NVM_BLK_T_FREE:
  496. if (f->flags & NVM_FACTORY_ERASE_ONLY_USER)
  497. set_bit(i, &f->blks[lunoff]);
  498. break;
  499. case NVM_BLK_T_HOST:
  500. if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS))
  501. set_bit(i, &f->blks[lunoff]);
  502. break;
  503. case NVM_BLK_T_GRWN_BAD:
  504. if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS))
  505. set_bit(i, &f->blks[lunoff]);
  506. break;
  507. default:
  508. set_bit(i, &f->blks[lunoff]);
  509. break;
  510. }
  511. }
  512. return 0;
  513. }
  514. static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
  515. int max_ppas, struct factory_blks *f)
  516. {
  517. struct ppa_addr ppa;
  518. int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
  519. unsigned long *offset;
  520. while (!done) {
  521. done = 1;
  522. for (ch = 0; ch < dev->nr_chnls; ch++) {
  523. for (lun = 0; lun < dev->luns_per_chnl; lun++) {
  524. idx = factory_blk_offset(dev, ch, lun);
  525. offset = &f->blks[idx];
  526. blkid = find_first_zero_bit(offset,
  527. dev->blks_per_lun);
  528. if (blkid >= dev->blks_per_lun)
  529. continue;
  530. set_bit(blkid, offset);
  531. ppa.ppa = 0;
  532. ppa.g.ch = ch;
  533. ppa.g.lun = lun;
  534. ppa.g.blk = blkid;
  535. pr_debug("nvm: erase ppa (%u %u %u)\n",
  536. ppa.g.ch,
  537. ppa.g.lun,
  538. ppa.g.blk);
  539. erase_list[ppa_cnt] = ppa;
  540. ppa_cnt++;
  541. done = 0;
  542. if (ppa_cnt == max_ppas)
  543. return ppa_cnt;
  544. }
  545. }
  546. }
  547. return ppa_cnt;
  548. }
  549. static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
  550. nvm_bb_update_fn *fn, void *priv)
  551. {
  552. struct ppa_addr dev_ppa;
  553. int ret;
  554. dev_ppa = generic_to_dev_addr(dev, ppa);
  555. ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv);
  556. if (ret)
  557. pr_err("nvm: failed bb tbl for ch%u lun%u\n",
  558. ppa.g.ch, ppa.g.blk);
  559. return ret;
  560. }
  561. static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
  562. {
  563. int ch, lun, ret;
  564. struct ppa_addr ppa;
  565. ppa.ppa = 0;
  566. for (ch = 0; ch < dev->nr_chnls; ch++) {
  567. for (lun = 0; lun < dev->luns_per_chnl; lun++) {
  568. ppa.g.ch = ch;
  569. ppa.g.lun = lun;
  570. ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks,
  571. f);
  572. if (ret)
  573. return ret;
  574. }
  575. }
  576. return 0;
  577. }
  578. int nvm_dev_factory(struct nvm_dev *dev, int flags)
  579. {
  580. struct factory_blks f;
  581. struct ppa_addr *ppas;
  582. int ppa_cnt, ret = -ENOMEM;
  583. int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
  584. struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
  585. struct sysblk_scan s;
  586. f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
  587. GFP_KERNEL);
  588. if (!f.blks)
  589. return ret;
  590. ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
  591. if (!ppas)
  592. goto err_blks;
  593. f.dev = dev;
  594. f.flags = flags;
  595. /* create list of blks to be erased */
  596. ret = nvm_fact_select_blks(dev, &f);
  597. if (ret)
  598. goto err_ppas;
  599. /* continue to erase until list of blks until empty */
  600. while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0)
  601. nvm_erase_ppa(dev, ppas, ppa_cnt);
  602. /* mark host reserved blocks free */
  603. if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
  604. nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
  605. mutex_lock(&dev->mlock);
  606. ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas,
  607. sysblk_get_host_blks);
  608. if (!ret)
  609. ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
  610. mutex_unlock(&dev->mlock);
  611. }
  612. err_ppas:
  613. kfree(ppas);
  614. err_blks:
  615. kfree(f.blks);
  616. return ret;
  617. }
  618. EXPORT_SYMBOL(nvm_dev_factory);