core.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
  3. * Initial release: Matias Bjorling <m@bjorling.me>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version
  7. * 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; see the file COPYING. If not, write to
  16. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  17. * USA.
  18. *
  19. */
  20. #include <linux/list.h>
  21. #include <linux/types.h>
  22. #include <linux/sem.h>
  23. #include <linux/bitmap.h>
  24. #include <linux/moduleparam.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/lightnvm.h>
  27. #include <linux/sched/sysctl.h>
  28. static LIST_HEAD(nvm_tgt_types);
  29. static DECLARE_RWSEM(nvm_tgtt_lock);
  30. static LIST_HEAD(nvm_mgrs);
  31. static LIST_HEAD(nvm_devices);
  32. static DECLARE_RWSEM(nvm_lock);
  33. struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
  34. {
  35. struct nvm_tgt_type *tmp, *tt = NULL;
  36. if (lock)
  37. down_write(&nvm_tgtt_lock);
  38. list_for_each_entry(tmp, &nvm_tgt_types, list)
  39. if (!strcmp(name, tmp->name)) {
  40. tt = tmp;
  41. break;
  42. }
  43. if (lock)
  44. up_write(&nvm_tgtt_lock);
  45. return tt;
  46. }
  47. EXPORT_SYMBOL(nvm_find_target_type);
  48. int nvm_register_tgt_type(struct nvm_tgt_type *tt)
  49. {
  50. int ret = 0;
  51. down_write(&nvm_tgtt_lock);
  52. if (nvm_find_target_type(tt->name, 0))
  53. ret = -EEXIST;
  54. else
  55. list_add(&tt->list, &nvm_tgt_types);
  56. up_write(&nvm_tgtt_lock);
  57. return ret;
  58. }
  59. EXPORT_SYMBOL(nvm_register_tgt_type);
  60. void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
  61. {
  62. if (!tt)
  63. return;
  64. down_write(&nvm_lock);
  65. list_del(&tt->list);
  66. up_write(&nvm_lock);
  67. }
  68. EXPORT_SYMBOL(nvm_unregister_tgt_type);
  69. void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
  70. dma_addr_t *dma_handler)
  71. {
  72. return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
  73. dma_handler);
  74. }
  75. EXPORT_SYMBOL(nvm_dev_dma_alloc);
  76. void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
  77. {
  78. dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
  79. }
  80. EXPORT_SYMBOL(nvm_dev_dma_free);
  81. static struct nvmm_type *nvm_find_mgr_type(const char *name)
  82. {
  83. struct nvmm_type *mt;
  84. list_for_each_entry(mt, &nvm_mgrs, list)
  85. if (!strcmp(name, mt->name))
  86. return mt;
  87. return NULL;
  88. }
  89. static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
  90. {
  91. struct nvmm_type *mt;
  92. int ret;
  93. lockdep_assert_held(&nvm_lock);
  94. list_for_each_entry(mt, &nvm_mgrs, list) {
  95. if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
  96. continue;
  97. ret = mt->register_mgr(dev);
  98. if (ret < 0) {
  99. pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
  100. ret, dev->name);
  101. return NULL; /* initialization failed */
  102. } else if (ret > 0)
  103. return mt;
  104. }
  105. return NULL;
  106. }
  107. int nvm_register_mgr(struct nvmm_type *mt)
  108. {
  109. struct nvm_dev *dev;
  110. int ret = 0;
  111. down_write(&nvm_lock);
  112. if (nvm_find_mgr_type(mt->name)) {
  113. ret = -EEXIST;
  114. goto finish;
  115. } else {
  116. list_add(&mt->list, &nvm_mgrs);
  117. }
  118. /* try to register media mgr if any device have none configured */
  119. list_for_each_entry(dev, &nvm_devices, devices) {
  120. if (dev->mt)
  121. continue;
  122. dev->mt = nvm_init_mgr(dev);
  123. }
  124. finish:
  125. up_write(&nvm_lock);
  126. return ret;
  127. }
  128. EXPORT_SYMBOL(nvm_register_mgr);
  129. void nvm_unregister_mgr(struct nvmm_type *mt)
  130. {
  131. if (!mt)
  132. return;
  133. down_write(&nvm_lock);
  134. list_del(&mt->list);
  135. up_write(&nvm_lock);
  136. }
  137. EXPORT_SYMBOL(nvm_unregister_mgr);
  138. static struct nvm_dev *nvm_find_nvm_dev(const char *name)
  139. {
  140. struct nvm_dev *dev;
  141. list_for_each_entry(dev, &nvm_devices, devices)
  142. if (!strcmp(name, dev->name))
  143. return dev;
  144. return NULL;
  145. }
  146. static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
  147. struct nvm_rq *rqd)
  148. {
  149. struct nvm_dev *dev = tgt_dev->parent;
  150. int i;
  151. if (rqd->nr_ppas > 1) {
  152. for (i = 0; i < rqd->nr_ppas; i++) {
  153. rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev,
  154. rqd->ppa_list[i], TRANS_TGT_TO_DEV);
  155. rqd->ppa_list[i] = generic_to_dev_addr(dev,
  156. rqd->ppa_list[i]);
  157. }
  158. } else {
  159. rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr,
  160. TRANS_TGT_TO_DEV);
  161. rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
  162. }
  163. }
  164. int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
  165. int type)
  166. {
  167. struct nvm_rq rqd;
  168. int ret;
  169. if (nr_ppas > dev->ops->max_phys_sect) {
  170. pr_err("nvm: unable to update all sysblocks atomically\n");
  171. return -EINVAL;
  172. }
  173. memset(&rqd, 0, sizeof(struct nvm_rq));
  174. nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
  175. nvm_generic_to_addr_mode(dev, &rqd);
  176. ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
  177. nvm_free_rqd_ppalist(dev, &rqd);
  178. if (ret) {
  179. pr_err("nvm: sysblk failed bb mark\n");
  180. return -EINVAL;
  181. }
  182. return 0;
  183. }
  184. EXPORT_SYMBOL(nvm_set_bb_tbl);
  185. int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
  186. int nr_ppas, int type)
  187. {
  188. struct nvm_dev *dev = tgt_dev->parent;
  189. struct nvm_rq rqd;
  190. int ret;
  191. if (nr_ppas > dev->ops->max_phys_sect) {
  192. pr_err("nvm: unable to update all blocks atomically\n");
  193. return -EINVAL;
  194. }
  195. memset(&rqd, 0, sizeof(struct nvm_rq));
  196. nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
  197. nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
  198. ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
  199. nvm_free_rqd_ppalist(dev, &rqd);
  200. if (ret) {
  201. pr_err("nvm: sysblk failed bb mark\n");
  202. return -EINVAL;
  203. }
  204. return 0;
  205. }
  206. EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
  207. int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
  208. {
  209. struct nvm_dev *dev = tgt_dev->parent;
  210. return dev->ops->max_phys_sect;
  211. }
  212. EXPORT_SYMBOL(nvm_max_phys_sects);
  213. int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  214. {
  215. struct nvm_dev *dev = tgt_dev->parent;
  216. return dev->mt->submit_io(tgt_dev, rqd);
  217. }
  218. EXPORT_SYMBOL(nvm_submit_io);
  219. int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
  220. {
  221. struct nvm_dev *dev = tgt_dev->parent;
  222. return dev->mt->erase_blk(tgt_dev, p, flags);
  223. }
  224. EXPORT_SYMBOL(nvm_erase_blk);
  225. int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
  226. nvm_l2p_update_fn *update_l2p, void *priv)
  227. {
  228. struct nvm_dev *dev = tgt_dev->parent;
  229. if (!dev->ops->get_l2p_tbl)
  230. return 0;
  231. return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
  232. }
  233. EXPORT_SYMBOL(nvm_get_l2p_tbl);
  234. int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
  235. {
  236. struct nvm_dev *dev = tgt_dev->parent;
  237. return dev->mt->get_area(dev, lba, len);
  238. }
  239. EXPORT_SYMBOL(nvm_get_area);
  240. void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba)
  241. {
  242. struct nvm_dev *dev = tgt_dev->parent;
  243. dev->mt->put_area(dev, lba);
  244. }
  245. EXPORT_SYMBOL(nvm_put_area);
  246. void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
  247. {
  248. int i;
  249. if (rqd->nr_ppas > 1) {
  250. for (i = 0; i < rqd->nr_ppas; i++)
  251. rqd->ppa_list[i] = dev_to_generic_addr(dev,
  252. rqd->ppa_list[i]);
  253. } else {
  254. rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
  255. }
  256. }
  257. EXPORT_SYMBOL(nvm_addr_to_generic_mode);
  258. void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
  259. {
  260. int i;
  261. if (rqd->nr_ppas > 1) {
  262. for (i = 0; i < rqd->nr_ppas; i++)
  263. rqd->ppa_list[i] = generic_to_dev_addr(dev,
  264. rqd->ppa_list[i]);
  265. } else {
  266. rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
  267. }
  268. }
  269. EXPORT_SYMBOL(nvm_generic_to_addr_mode);
  270. int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
  271. const struct ppa_addr *ppas, int nr_ppas, int vblk)
  272. {
  273. struct nvm_geo *geo = &dev->geo;
  274. int i, plane_cnt, pl_idx;
  275. struct ppa_addr ppa;
  276. if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
  277. rqd->nr_ppas = nr_ppas;
  278. rqd->ppa_addr = ppas[0];
  279. return 0;
  280. }
  281. rqd->nr_ppas = nr_ppas;
  282. rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
  283. if (!rqd->ppa_list) {
  284. pr_err("nvm: failed to allocate dma memory\n");
  285. return -ENOMEM;
  286. }
  287. if (!vblk) {
  288. for (i = 0; i < nr_ppas; i++)
  289. rqd->ppa_list[i] = ppas[i];
  290. } else {
  291. plane_cnt = geo->plane_mode;
  292. rqd->nr_ppas *= plane_cnt;
  293. for (i = 0; i < nr_ppas; i++) {
  294. for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
  295. ppa = ppas[i];
  296. ppa.g.pl = pl_idx;
  297. rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
  298. }
  299. }
  300. }
  301. return 0;
  302. }
  303. EXPORT_SYMBOL(nvm_set_rqd_ppalist);
  304. void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
  305. {
  306. if (!rqd->ppa_list)
  307. return;
  308. nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
  309. }
  310. EXPORT_SYMBOL(nvm_free_rqd_ppalist);
  311. int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
  312. int flags)
  313. {
  314. struct nvm_rq rqd;
  315. int ret;
  316. if (!dev->ops->erase_block)
  317. return 0;
  318. memset(&rqd, 0, sizeof(struct nvm_rq));
  319. ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
  320. if (ret)
  321. return ret;
  322. nvm_generic_to_addr_mode(dev, &rqd);
  323. rqd.flags = flags;
  324. ret = dev->ops->erase_block(dev, &rqd);
  325. nvm_free_rqd_ppalist(dev, &rqd);
  326. return ret;
  327. }
  328. EXPORT_SYMBOL(nvm_erase_ppa);
  329. void nvm_end_io(struct nvm_rq *rqd, int error)
  330. {
  331. rqd->error = error;
  332. rqd->end_io(rqd);
  333. }
  334. EXPORT_SYMBOL(nvm_end_io);
  335. static void nvm_end_io_sync(struct nvm_rq *rqd)
  336. {
  337. struct completion *waiting = rqd->wait;
  338. rqd->wait = NULL;
  339. complete(waiting);
  340. }
  341. static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
  342. int flags, void *buf, int len)
  343. {
  344. DECLARE_COMPLETION_ONSTACK(wait);
  345. struct bio *bio;
  346. int ret;
  347. unsigned long hang_check;
  348. bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
  349. if (IS_ERR_OR_NULL(bio))
  350. return -ENOMEM;
  351. nvm_generic_to_addr_mode(dev, rqd);
  352. rqd->dev = NULL;
  353. rqd->opcode = opcode;
  354. rqd->flags = flags;
  355. rqd->bio = bio;
  356. rqd->wait = &wait;
  357. rqd->end_io = nvm_end_io_sync;
  358. ret = dev->ops->submit_io(dev, rqd);
  359. if (ret) {
  360. bio_put(bio);
  361. return ret;
  362. }
  363. /* Prevent hang_check timer from firing at us during very long I/O */
  364. hang_check = sysctl_hung_task_timeout_secs;
  365. if (hang_check)
  366. while (!wait_for_completion_io_timeout(&wait,
  367. hang_check * (HZ/2)))
  368. ;
  369. else
  370. wait_for_completion_io(&wait);
  371. return rqd->error;
  372. }
  373. /**
  374. * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
  375. * take to free ppa list if necessary.
  376. * @dev: device
  377. * @ppa_list: user created ppa_list
  378. * @nr_ppas: length of ppa_list
  379. * @opcode: device opcode
  380. * @flags: device flags
  381. * @buf: data buffer
  382. * @len: data buffer length
  383. */
  384. int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
  385. int nr_ppas, int opcode, int flags, void *buf, int len)
  386. {
  387. struct nvm_rq rqd;
  388. if (dev->ops->max_phys_sect < nr_ppas)
  389. return -EINVAL;
  390. memset(&rqd, 0, sizeof(struct nvm_rq));
  391. rqd.nr_ppas = nr_ppas;
  392. if (nr_ppas > 1)
  393. rqd.ppa_list = ppa_list;
  394. else
  395. rqd.ppa_addr = ppa_list[0];
  396. return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
  397. }
  398. EXPORT_SYMBOL(nvm_submit_ppa_list);
  399. /**
  400. * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
  401. * as single, dual, quad plane PPAs depending on device type.
  402. * @dev: device
  403. * @ppa: user created ppa_list
  404. * @nr_ppas: length of ppa_list
  405. * @opcode: device opcode
  406. * @flags: device flags
  407. * @buf: data buffer
  408. * @len: data buffer length
  409. */
  410. int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
  411. int opcode, int flags, void *buf, int len)
  412. {
  413. struct nvm_rq rqd;
  414. int ret;
  415. memset(&rqd, 0, sizeof(struct nvm_rq));
  416. ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
  417. if (ret)
  418. return ret;
  419. ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
  420. nvm_free_rqd_ppalist(dev, &rqd);
  421. return ret;
  422. }
  423. EXPORT_SYMBOL(nvm_submit_ppa);
  424. /*
  425. * folds a bad block list from its plane representation to its virtual
  426. * block representation. The fold is done in place and reduced size is
  427. * returned.
  428. *
  429. * If any of the planes status are bad or grown bad block, the virtual block
  430. * is marked bad. If not bad, the first plane state acts as the block state.
  431. */
  432. int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
  433. {
  434. struct nvm_geo *geo = &dev->geo;
  435. int blk, offset, pl, blktype;
  436. if (nr_blks != geo->blks_per_lun * geo->plane_mode)
  437. return -EINVAL;
  438. for (blk = 0; blk < geo->blks_per_lun; blk++) {
  439. offset = blk * geo->plane_mode;
  440. blktype = blks[offset];
  441. /* Bad blocks on any planes take precedence over other types */
  442. for (pl = 0; pl < geo->plane_mode; pl++) {
  443. if (blks[offset + pl] &
  444. (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
  445. blktype = blks[offset + pl];
  446. break;
  447. }
  448. }
  449. blks[blk] = blktype;
  450. }
  451. return geo->blks_per_lun;
  452. }
  453. EXPORT_SYMBOL(nvm_bb_tbl_fold);
  454. int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
  455. {
  456. ppa = generic_to_dev_addr(dev, ppa);
  457. return dev->ops->get_bb_tbl(dev, ppa, blks);
  458. }
  459. EXPORT_SYMBOL(nvm_get_bb_tbl);
  460. int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
  461. u8 *blks)
  462. {
  463. struct nvm_dev *dev = tgt_dev->parent;
  464. ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
  465. return nvm_get_bb_tbl(dev, ppa, blks);
  466. }
  467. EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
  468. static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
  469. {
  470. struct nvm_geo *geo = &dev->geo;
  471. int i;
  472. dev->lps_per_blk = geo->pgs_per_blk;
  473. dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
  474. if (!dev->lptbl)
  475. return -ENOMEM;
  476. /* Just a linear array */
  477. for (i = 0; i < dev->lps_per_blk; i++)
  478. dev->lptbl[i] = i;
  479. return 0;
  480. }
  481. static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
  482. {
  483. int i, p;
  484. struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
  485. if (!mlc->num_pairs)
  486. return 0;
  487. dev->lps_per_blk = mlc->num_pairs;
  488. dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
  489. if (!dev->lptbl)
  490. return -ENOMEM;
  491. /* The lower page table encoding consists of a list of bytes, where each
  492. * has a lower and an upper half. The first half byte maintains the
  493. * increment value and every value after is an offset added to the
  494. * previous incrementation value
  495. */
  496. dev->lptbl[0] = mlc->pairs[0] & 0xF;
  497. for (i = 1; i < dev->lps_per_blk; i++) {
  498. p = mlc->pairs[i >> 1];
  499. if (i & 0x1) /* upper */
  500. dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
  501. else /* lower */
  502. dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
  503. }
  504. return 0;
  505. }
  506. static int nvm_core_init(struct nvm_dev *dev)
  507. {
  508. struct nvm_id *id = &dev->identity;
  509. struct nvm_id_group *grp = &id->groups[0];
  510. struct nvm_geo *geo = &dev->geo;
  511. int ret;
  512. /* Whole device values */
  513. geo->nr_chnls = grp->num_ch;
  514. geo->luns_per_chnl = grp->num_lun;
  515. /* Generic device values */
  516. geo->pgs_per_blk = grp->num_pg;
  517. geo->blks_per_lun = grp->num_blk;
  518. geo->nr_planes = grp->num_pln;
  519. geo->fpg_size = grp->fpg_sz;
  520. geo->pfpg_size = grp->fpg_sz * grp->num_pln;
  521. geo->sec_size = grp->csecs;
  522. geo->oob_size = grp->sos;
  523. geo->sec_per_pg = grp->fpg_sz / grp->csecs;
  524. geo->mccap = grp->mccap;
  525. memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
  526. geo->plane_mode = NVM_PLANE_SINGLE;
  527. geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
  528. if (grp->mpos & 0x020202)
  529. geo->plane_mode = NVM_PLANE_DOUBLE;
  530. if (grp->mpos & 0x040404)
  531. geo->plane_mode = NVM_PLANE_QUAD;
  532. if (grp->mtype != 0) {
  533. pr_err("nvm: memory type not supported\n");
  534. return -EINVAL;
  535. }
  536. /* calculated values */
  537. geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
  538. geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
  539. geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
  540. geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
  541. dev->total_secs = geo->nr_luns * geo->sec_per_lun;
  542. dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
  543. sizeof(unsigned long), GFP_KERNEL);
  544. if (!dev->lun_map)
  545. return -ENOMEM;
  546. switch (grp->fmtype) {
  547. case NVM_ID_FMTYPE_SLC:
  548. if (nvm_init_slc_tbl(dev, grp)) {
  549. ret = -ENOMEM;
  550. goto err_fmtype;
  551. }
  552. break;
  553. case NVM_ID_FMTYPE_MLC:
  554. if (nvm_init_mlc_tbl(dev, grp)) {
  555. ret = -ENOMEM;
  556. goto err_fmtype;
  557. }
  558. break;
  559. default:
  560. pr_err("nvm: flash type not supported\n");
  561. ret = -EINVAL;
  562. goto err_fmtype;
  563. }
  564. mutex_init(&dev->mlock);
  565. spin_lock_init(&dev->lock);
  566. blk_queue_logical_block_size(dev->q, geo->sec_size);
  567. return 0;
  568. err_fmtype:
  569. kfree(dev->lun_map);
  570. return ret;
  571. }
  572. static void nvm_free_mgr(struct nvm_dev *dev)
  573. {
  574. if (!dev->mt)
  575. return;
  576. dev->mt->unregister_mgr(dev);
  577. dev->mt = NULL;
  578. }
  579. void nvm_free(struct nvm_dev *dev)
  580. {
  581. if (!dev)
  582. return;
  583. nvm_free_mgr(dev);
  584. if (dev->dma_pool)
  585. dev->ops->destroy_dma_pool(dev->dma_pool);
  586. kfree(dev->lptbl);
  587. kfree(dev->lun_map);
  588. kfree(dev);
  589. }
  590. static int nvm_init(struct nvm_dev *dev)
  591. {
  592. struct nvm_geo *geo = &dev->geo;
  593. int ret = -EINVAL;
  594. if (!dev->q || !dev->ops)
  595. return ret;
  596. if (dev->ops->identity(dev, &dev->identity)) {
  597. pr_err("nvm: device could not be identified\n");
  598. goto err;
  599. }
  600. pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
  601. dev->identity.ver_id, dev->identity.vmnt,
  602. dev->identity.cgrps);
  603. if (dev->identity.ver_id != 1) {
  604. pr_err("nvm: device not supported by kernel.");
  605. goto err;
  606. }
  607. if (dev->identity.cgrps != 1) {
  608. pr_err("nvm: only one group configuration supported.");
  609. goto err;
  610. }
  611. ret = nvm_core_init(dev);
  612. if (ret) {
  613. pr_err("nvm: could not initialize core structures.\n");
  614. goto err;
  615. }
  616. pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
  617. dev->name, geo->sec_per_pg, geo->nr_planes,
  618. geo->pgs_per_blk, geo->blks_per_lun,
  619. geo->nr_luns, geo->nr_chnls);
  620. return 0;
  621. err:
  622. pr_err("nvm: failed to initialize nvm\n");
  623. return ret;
  624. }
  625. struct nvm_dev *nvm_alloc_dev(int node)
  626. {
  627. return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
  628. }
  629. EXPORT_SYMBOL(nvm_alloc_dev);
  630. int nvm_register(struct nvm_dev *dev)
  631. {
  632. int ret;
  633. ret = nvm_init(dev);
  634. if (ret)
  635. goto err_init;
  636. if (dev->ops->max_phys_sect > 256) {
  637. pr_info("nvm: max sectors supported is 256.\n");
  638. ret = -EINVAL;
  639. goto err_init;
  640. }
  641. if (dev->ops->max_phys_sect > 1) {
  642. dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
  643. if (!dev->dma_pool) {
  644. pr_err("nvm: could not create dma pool\n");
  645. ret = -ENOMEM;
  646. goto err_init;
  647. }
  648. }
  649. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
  650. ret = nvm_get_sysblock(dev, &dev->sb);
  651. if (!ret)
  652. pr_err("nvm: device not initialized.\n");
  653. else if (ret < 0)
  654. pr_err("nvm: err (%d) on device initialization\n", ret);
  655. }
  656. /* register device with a supported media manager */
  657. down_write(&nvm_lock);
  658. if (ret > 0)
  659. dev->mt = nvm_init_mgr(dev);
  660. list_add(&dev->devices, &nvm_devices);
  661. up_write(&nvm_lock);
  662. return 0;
  663. err_init:
  664. kfree(dev->lun_map);
  665. return ret;
  666. }
  667. EXPORT_SYMBOL(nvm_register);
  668. void nvm_unregister(struct nvm_dev *dev)
  669. {
  670. down_write(&nvm_lock);
  671. list_del(&dev->devices);
  672. up_write(&nvm_lock);
  673. nvm_free(dev);
  674. }
  675. EXPORT_SYMBOL(nvm_unregister);
  676. static int __nvm_configure_create(struct nvm_ioctl_create *create)
  677. {
  678. struct nvm_dev *dev;
  679. struct nvm_ioctl_create_simple *s;
  680. down_write(&nvm_lock);
  681. dev = nvm_find_nvm_dev(create->dev);
  682. up_write(&nvm_lock);
  683. if (!dev) {
  684. pr_err("nvm: device not found\n");
  685. return -EINVAL;
  686. }
  687. if (!dev->mt) {
  688. pr_info("nvm: device has no media manager registered.\n");
  689. return -ENODEV;
  690. }
  691. if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
  692. pr_err("nvm: config type not valid\n");
  693. return -EINVAL;
  694. }
  695. s = &create->conf.s;
  696. if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
  697. pr_err("nvm: lun out of bound (%u:%u > %u)\n",
  698. s->lun_begin, s->lun_end, dev->geo.nr_luns);
  699. return -EINVAL;
  700. }
  701. return dev->mt->create_tgt(dev, create);
  702. }
  703. static long nvm_ioctl_info(struct file *file, void __user *arg)
  704. {
  705. struct nvm_ioctl_info *info;
  706. struct nvm_tgt_type *tt;
  707. int tgt_iter = 0;
  708. if (!capable(CAP_SYS_ADMIN))
  709. return -EPERM;
  710. info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
  711. if (IS_ERR(info))
  712. return -EFAULT;
  713. info->version[0] = NVM_VERSION_MAJOR;
  714. info->version[1] = NVM_VERSION_MINOR;
  715. info->version[2] = NVM_VERSION_PATCH;
  716. down_write(&nvm_lock);
  717. list_for_each_entry(tt, &nvm_tgt_types, list) {
  718. struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
  719. tgt->version[0] = tt->version[0];
  720. tgt->version[1] = tt->version[1];
  721. tgt->version[2] = tt->version[2];
  722. strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
  723. tgt_iter++;
  724. }
  725. info->tgtsize = tgt_iter;
  726. up_write(&nvm_lock);
  727. if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
  728. kfree(info);
  729. return -EFAULT;
  730. }
  731. kfree(info);
  732. return 0;
  733. }
  734. static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
  735. {
  736. struct nvm_ioctl_get_devices *devices;
  737. struct nvm_dev *dev;
  738. int i = 0;
  739. if (!capable(CAP_SYS_ADMIN))
  740. return -EPERM;
  741. devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
  742. if (!devices)
  743. return -ENOMEM;
  744. down_write(&nvm_lock);
  745. list_for_each_entry(dev, &nvm_devices, devices) {
  746. struct nvm_ioctl_device_info *info = &devices->info[i];
  747. sprintf(info->devname, "%s", dev->name);
  748. if (dev->mt) {
  749. info->bmversion[0] = dev->mt->version[0];
  750. info->bmversion[1] = dev->mt->version[1];
  751. info->bmversion[2] = dev->mt->version[2];
  752. sprintf(info->bmname, "%s", dev->mt->name);
  753. } else {
  754. sprintf(info->bmname, "none");
  755. }
  756. i++;
  757. if (i > 31) {
  758. pr_err("nvm: max 31 devices can be reported.\n");
  759. break;
  760. }
  761. }
  762. up_write(&nvm_lock);
  763. devices->nr_devices = i;
  764. if (copy_to_user(arg, devices,
  765. sizeof(struct nvm_ioctl_get_devices))) {
  766. kfree(devices);
  767. return -EFAULT;
  768. }
  769. kfree(devices);
  770. return 0;
  771. }
  772. static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
  773. {
  774. struct nvm_ioctl_create create;
  775. if (!capable(CAP_SYS_ADMIN))
  776. return -EPERM;
  777. if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
  778. return -EFAULT;
  779. create.dev[DISK_NAME_LEN - 1] = '\0';
  780. create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
  781. create.tgtname[DISK_NAME_LEN - 1] = '\0';
  782. if (create.flags != 0) {
  783. pr_err("nvm: no flags supported\n");
  784. return -EINVAL;
  785. }
  786. return __nvm_configure_create(&create);
  787. }
  788. static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
  789. {
  790. struct nvm_ioctl_remove remove;
  791. struct nvm_dev *dev;
  792. int ret = 0;
  793. if (!capable(CAP_SYS_ADMIN))
  794. return -EPERM;
  795. if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
  796. return -EFAULT;
  797. remove.tgtname[DISK_NAME_LEN - 1] = '\0';
  798. if (remove.flags != 0) {
  799. pr_err("nvm: no flags supported\n");
  800. return -EINVAL;
  801. }
  802. list_for_each_entry(dev, &nvm_devices, devices) {
  803. ret = dev->mt->remove_tgt(dev, &remove);
  804. if (!ret)
  805. break;
  806. }
  807. return ret;
  808. }
  809. static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
  810. {
  811. info->seqnr = 1;
  812. info->erase_cnt = 0;
  813. info->version = 1;
  814. }
  815. static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
  816. {
  817. struct nvm_dev *dev;
  818. struct nvm_sb_info info;
  819. int ret;
  820. down_write(&nvm_lock);
  821. dev = nvm_find_nvm_dev(init->dev);
  822. up_write(&nvm_lock);
  823. if (!dev) {
  824. pr_err("nvm: device not found\n");
  825. return -EINVAL;
  826. }
  827. nvm_setup_nvm_sb_info(&info);
  828. strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
  829. info.fs_ppa.ppa = -1;
  830. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
  831. ret = nvm_init_sysblock(dev, &info);
  832. if (ret)
  833. return ret;
  834. }
  835. memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
  836. down_write(&nvm_lock);
  837. dev->mt = nvm_init_mgr(dev);
  838. up_write(&nvm_lock);
  839. return 0;
  840. }
  841. static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
  842. {
  843. struct nvm_ioctl_dev_init init;
  844. if (!capable(CAP_SYS_ADMIN))
  845. return -EPERM;
  846. if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
  847. return -EFAULT;
  848. if (init.flags != 0) {
  849. pr_err("nvm: no flags supported\n");
  850. return -EINVAL;
  851. }
  852. init.dev[DISK_NAME_LEN - 1] = '\0';
  853. return __nvm_ioctl_dev_init(&init);
  854. }
  855. static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
  856. {
  857. struct nvm_ioctl_dev_factory fact;
  858. struct nvm_dev *dev;
  859. if (!capable(CAP_SYS_ADMIN))
  860. return -EPERM;
  861. if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
  862. return -EFAULT;
  863. fact.dev[DISK_NAME_LEN - 1] = '\0';
  864. if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
  865. return -EINVAL;
  866. down_write(&nvm_lock);
  867. dev = nvm_find_nvm_dev(fact.dev);
  868. up_write(&nvm_lock);
  869. if (!dev) {
  870. pr_err("nvm: device not found\n");
  871. return -EINVAL;
  872. }
  873. nvm_free_mgr(dev);
  874. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
  875. return nvm_dev_factory(dev, fact.flags);
  876. return 0;
  877. }
  878. static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
  879. {
  880. void __user *argp = (void __user *)arg;
  881. switch (cmd) {
  882. case NVM_INFO:
  883. return nvm_ioctl_info(file, argp);
  884. case NVM_GET_DEVICES:
  885. return nvm_ioctl_get_devices(file, argp);
  886. case NVM_DEV_CREATE:
  887. return nvm_ioctl_dev_create(file, argp);
  888. case NVM_DEV_REMOVE:
  889. return nvm_ioctl_dev_remove(file, argp);
  890. case NVM_DEV_INIT:
  891. return nvm_ioctl_dev_init(file, argp);
  892. case NVM_DEV_FACTORY:
  893. return nvm_ioctl_dev_factory(file, argp);
  894. }
  895. return 0;
  896. }
  897. static const struct file_operations _ctl_fops = {
  898. .open = nonseekable_open,
  899. .unlocked_ioctl = nvm_ctl_ioctl,
  900. .owner = THIS_MODULE,
  901. .llseek = noop_llseek,
  902. };
  903. static struct miscdevice _nvm_misc = {
  904. .minor = MISC_DYNAMIC_MINOR,
  905. .name = "lightnvm",
  906. .nodename = "lightnvm/control",
  907. .fops = &_ctl_fops,
  908. };
  909. builtin_misc_device(_nvm_misc);