core.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
  3. * Initial release: Matias Bjorling <m@bjorling.me>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version
  7. * 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; see the file COPYING. If not, write to
  16. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  17. * USA.
  18. *
  19. */
  20. #include <linux/blkdev.h>
  21. #include <linux/blk-mq.h>
  22. #include <linux/list.h>
  23. #include <linux/types.h>
  24. #include <linux/sem.h>
  25. #include <linux/bitmap.h>
  26. #include <linux/module.h>
  27. #include <linux/miscdevice.h>
  28. #include <linux/lightnvm.h>
  29. #include <linux/sched/sysctl.h>
  30. #include <uapi/linux/lightnvm.h>
  31. static LIST_HEAD(nvm_targets);
  32. static LIST_HEAD(nvm_mgrs);
  33. static LIST_HEAD(nvm_devices);
  34. static DECLARE_RWSEM(nvm_lock);
  35. static struct nvm_tgt_type *nvm_find_target_type(const char *name)
  36. {
  37. struct nvm_tgt_type *tt;
  38. list_for_each_entry(tt, &nvm_targets, list)
  39. if (!strcmp(name, tt->name))
  40. return tt;
  41. return NULL;
  42. }
  43. int nvm_register_target(struct nvm_tgt_type *tt)
  44. {
  45. int ret = 0;
  46. down_write(&nvm_lock);
  47. if (nvm_find_target_type(tt->name))
  48. ret = -EEXIST;
  49. else
  50. list_add(&tt->list, &nvm_targets);
  51. up_write(&nvm_lock);
  52. return ret;
  53. }
  54. EXPORT_SYMBOL(nvm_register_target);
  55. void nvm_unregister_target(struct nvm_tgt_type *tt)
  56. {
  57. if (!tt)
  58. return;
  59. down_write(&nvm_lock);
  60. list_del(&tt->list);
  61. up_write(&nvm_lock);
  62. }
  63. EXPORT_SYMBOL(nvm_unregister_target);
  64. void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
  65. dma_addr_t *dma_handler)
  66. {
  67. return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
  68. dma_handler);
  69. }
  70. EXPORT_SYMBOL(nvm_dev_dma_alloc);
  71. void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
  72. dma_addr_t dma_handler)
  73. {
  74. dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
  75. }
  76. EXPORT_SYMBOL(nvm_dev_dma_free);
  77. static struct nvmm_type *nvm_find_mgr_type(const char *name)
  78. {
  79. struct nvmm_type *mt;
  80. list_for_each_entry(mt, &nvm_mgrs, list)
  81. if (!strcmp(name, mt->name))
  82. return mt;
  83. return NULL;
  84. }
  85. struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
  86. {
  87. struct nvmm_type *mt;
  88. int ret;
  89. lockdep_assert_held(&nvm_lock);
  90. list_for_each_entry(mt, &nvm_mgrs, list) {
  91. if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
  92. continue;
  93. ret = mt->register_mgr(dev);
  94. if (ret < 0) {
  95. pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
  96. ret, dev->name);
  97. return NULL; /* initialization failed */
  98. } else if (ret > 0)
  99. return mt;
  100. }
  101. return NULL;
  102. }
  103. int nvm_register_mgr(struct nvmm_type *mt)
  104. {
  105. struct nvm_dev *dev;
  106. int ret = 0;
  107. down_write(&nvm_lock);
  108. if (nvm_find_mgr_type(mt->name)) {
  109. ret = -EEXIST;
  110. goto finish;
  111. } else {
  112. list_add(&mt->list, &nvm_mgrs);
  113. }
  114. /* try to register media mgr if any device have none configured */
  115. list_for_each_entry(dev, &nvm_devices, devices) {
  116. if (dev->mt)
  117. continue;
  118. dev->mt = nvm_init_mgr(dev);
  119. }
  120. finish:
  121. up_write(&nvm_lock);
  122. return ret;
  123. }
  124. EXPORT_SYMBOL(nvm_register_mgr);
  125. void nvm_unregister_mgr(struct nvmm_type *mt)
  126. {
  127. if (!mt)
  128. return;
  129. down_write(&nvm_lock);
  130. list_del(&mt->list);
  131. up_write(&nvm_lock);
  132. }
  133. EXPORT_SYMBOL(nvm_unregister_mgr);
  134. static struct nvm_dev *nvm_find_nvm_dev(const char *name)
  135. {
  136. struct nvm_dev *dev;
  137. list_for_each_entry(dev, &nvm_devices, devices)
  138. if (!strcmp(name, dev->name))
  139. return dev;
  140. return NULL;
  141. }
  142. struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
  143. unsigned long flags)
  144. {
  145. return dev->mt->get_blk_unlocked(dev, lun, flags);
  146. }
  147. EXPORT_SYMBOL(nvm_get_blk_unlocked);
  148. /* Assumes that all valid pages have already been moved on release to bm */
  149. void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
  150. {
  151. return dev->mt->put_blk_unlocked(dev, blk);
  152. }
  153. EXPORT_SYMBOL(nvm_put_blk_unlocked);
  154. struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
  155. unsigned long flags)
  156. {
  157. return dev->mt->get_blk(dev, lun, flags);
  158. }
  159. EXPORT_SYMBOL(nvm_get_blk);
  160. /* Assumes that all valid pages have already been moved on release to bm */
  161. void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
  162. {
  163. return dev->mt->put_blk(dev, blk);
  164. }
  165. EXPORT_SYMBOL(nvm_put_blk);
  166. int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
  167. {
  168. return dev->mt->submit_io(dev, rqd);
  169. }
  170. EXPORT_SYMBOL(nvm_submit_io);
  171. int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
  172. {
  173. return dev->mt->erase_blk(dev, blk, 0);
  174. }
  175. EXPORT_SYMBOL(nvm_erase_blk);
  176. void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
  177. {
  178. int i;
  179. if (rqd->nr_pages > 1) {
  180. for (i = 0; i < rqd->nr_pages; i++)
  181. rqd->ppa_list[i] = dev_to_generic_addr(dev,
  182. rqd->ppa_list[i]);
  183. } else {
  184. rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
  185. }
  186. }
  187. EXPORT_SYMBOL(nvm_addr_to_generic_mode);
  188. void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
  189. {
  190. int i;
  191. if (rqd->nr_pages > 1) {
  192. for (i = 0; i < rqd->nr_pages; i++)
  193. rqd->ppa_list[i] = generic_to_dev_addr(dev,
  194. rqd->ppa_list[i]);
  195. } else {
  196. rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
  197. }
  198. }
  199. EXPORT_SYMBOL(nvm_generic_to_addr_mode);
  200. int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
  201. struct ppa_addr *ppas, int nr_ppas)
  202. {
  203. int i, plane_cnt, pl_idx;
  204. if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
  205. rqd->nr_pages = 1;
  206. rqd->ppa_addr = ppas[0];
  207. return 0;
  208. }
  209. plane_cnt = dev->plane_mode;
  210. rqd->nr_pages = plane_cnt * nr_ppas;
  211. if (dev->ops->max_phys_sect < rqd->nr_pages)
  212. return -EINVAL;
  213. rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
  214. if (!rqd->ppa_list) {
  215. pr_err("nvm: failed to allocate dma memory\n");
  216. return -ENOMEM;
  217. }
  218. for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
  219. for (i = 0; i < nr_ppas; i++) {
  220. ppas[i].g.pl = pl_idx;
  221. rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
  222. }
  223. }
  224. return 0;
  225. }
  226. EXPORT_SYMBOL(nvm_set_rqd_ppalist);
  227. void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
  228. {
  229. if (!rqd->ppa_list)
  230. return;
  231. nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
  232. }
  233. EXPORT_SYMBOL(nvm_free_rqd_ppalist);
  234. int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
  235. {
  236. struct nvm_rq rqd;
  237. int ret;
  238. if (!dev->ops->erase_block)
  239. return 0;
  240. memset(&rqd, 0, sizeof(struct nvm_rq));
  241. ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
  242. if (ret)
  243. return ret;
  244. nvm_generic_to_addr_mode(dev, &rqd);
  245. ret = dev->ops->erase_block(dev, &rqd);
  246. nvm_free_rqd_ppalist(dev, &rqd);
  247. return ret;
  248. }
  249. EXPORT_SYMBOL(nvm_erase_ppa);
  250. void nvm_end_io(struct nvm_rq *rqd, int error)
  251. {
  252. rqd->error = error;
  253. rqd->end_io(rqd);
  254. }
  255. EXPORT_SYMBOL(nvm_end_io);
  256. static void nvm_end_io_sync(struct nvm_rq *rqd)
  257. {
  258. struct completion *waiting = rqd->wait;
  259. rqd->wait = NULL;
  260. complete(waiting);
  261. }
  262. int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
  263. int opcode, int flags, void *buf, int len)
  264. {
  265. DECLARE_COMPLETION_ONSTACK(wait);
  266. struct nvm_rq rqd;
  267. struct bio *bio;
  268. int ret;
  269. unsigned long hang_check;
  270. bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
  271. if (IS_ERR_OR_NULL(bio))
  272. return -ENOMEM;
  273. memset(&rqd, 0, sizeof(struct nvm_rq));
  274. ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
  275. if (ret) {
  276. bio_put(bio);
  277. return ret;
  278. }
  279. rqd.opcode = opcode;
  280. rqd.bio = bio;
  281. rqd.wait = &wait;
  282. rqd.dev = dev;
  283. rqd.end_io = nvm_end_io_sync;
  284. rqd.flags = flags;
  285. nvm_generic_to_addr_mode(dev, &rqd);
  286. ret = dev->ops->submit_io(dev, &rqd);
  287. /* Prevent hang_check timer from firing at us during very long I/O */
  288. hang_check = sysctl_hung_task_timeout_secs;
  289. if (hang_check)
  290. while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
  291. else
  292. wait_for_completion_io(&wait);
  293. nvm_free_rqd_ppalist(dev, &rqd);
  294. return rqd.error;
  295. }
  296. EXPORT_SYMBOL(nvm_submit_ppa);
  297. static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
  298. {
  299. int i;
  300. dev->lps_per_blk = dev->pgs_per_blk;
  301. dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
  302. if (!dev->lptbl)
  303. return -ENOMEM;
  304. /* Just a linear array */
  305. for (i = 0; i < dev->lps_per_blk; i++)
  306. dev->lptbl[i] = i;
  307. return 0;
  308. }
  309. static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
  310. {
  311. int i, p;
  312. struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
  313. if (!mlc->num_pairs)
  314. return 0;
  315. dev->lps_per_blk = mlc->num_pairs;
  316. dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
  317. if (!dev->lptbl)
  318. return -ENOMEM;
  319. /* The lower page table encoding consists of a list of bytes, where each
  320. * has a lower and an upper half. The first half byte maintains the
  321. * increment value and every value after is an offset added to the
  322. * previous incrementation value */
  323. dev->lptbl[0] = mlc->pairs[0] & 0xF;
  324. for (i = 1; i < dev->lps_per_blk; i++) {
  325. p = mlc->pairs[i >> 1];
  326. if (i & 0x1) /* upper */
  327. dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
  328. else /* lower */
  329. dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
  330. }
  331. return 0;
  332. }
  333. static int nvm_core_init(struct nvm_dev *dev)
  334. {
  335. struct nvm_id *id = &dev->identity;
  336. struct nvm_id_group *grp = &id->groups[0];
  337. /* device values */
  338. dev->nr_chnls = grp->num_ch;
  339. dev->luns_per_chnl = grp->num_lun;
  340. dev->pgs_per_blk = grp->num_pg;
  341. dev->blks_per_lun = grp->num_blk;
  342. dev->nr_planes = grp->num_pln;
  343. dev->sec_size = grp->csecs;
  344. dev->oob_size = grp->sos;
  345. dev->sec_per_pg = grp->fpg_sz / grp->csecs;
  346. dev->mccap = grp->mccap;
  347. memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
  348. dev->plane_mode = NVM_PLANE_SINGLE;
  349. dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
  350. if (grp->mtype != 0) {
  351. pr_err("nvm: memory type not supported\n");
  352. return -EINVAL;
  353. }
  354. switch (grp->fmtype) {
  355. case NVM_ID_FMTYPE_SLC:
  356. if (nvm_init_slc_tbl(dev, grp))
  357. return -ENOMEM;
  358. break;
  359. case NVM_ID_FMTYPE_MLC:
  360. if (nvm_init_mlc_tbl(dev, grp))
  361. return -ENOMEM;
  362. break;
  363. default:
  364. pr_err("nvm: flash type not supported\n");
  365. return -EINVAL;
  366. }
  367. if (!dev->lps_per_blk)
  368. pr_info("nvm: lower page programming table missing\n");
  369. if (grp->mpos & 0x020202)
  370. dev->plane_mode = NVM_PLANE_DOUBLE;
  371. if (grp->mpos & 0x040404)
  372. dev->plane_mode = NVM_PLANE_QUAD;
  373. /* calculated values */
  374. dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
  375. dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
  376. dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
  377. dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
  378. dev->total_secs = dev->nr_luns * dev->sec_per_lun;
  379. dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
  380. sizeof(unsigned long), GFP_KERNEL);
  381. if (!dev->lun_map)
  382. return -ENOMEM;
  383. INIT_LIST_HEAD(&dev->online_targets);
  384. mutex_init(&dev->mlock);
  385. spin_lock_init(&dev->lock);
  386. return 0;
  387. }
  388. static void nvm_free(struct nvm_dev *dev)
  389. {
  390. if (!dev)
  391. return;
  392. if (dev->mt)
  393. dev->mt->unregister_mgr(dev);
  394. kfree(dev->lptbl);
  395. }
  396. static int nvm_init(struct nvm_dev *dev)
  397. {
  398. int ret = -EINVAL;
  399. if (!dev->q || !dev->ops)
  400. return ret;
  401. if (dev->ops->identity(dev, &dev->identity)) {
  402. pr_err("nvm: device could not be identified\n");
  403. goto err;
  404. }
  405. pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
  406. dev->identity.ver_id, dev->identity.vmnt,
  407. dev->identity.cgrps);
  408. if (dev->identity.ver_id != 1) {
  409. pr_err("nvm: device not supported by kernel.");
  410. goto err;
  411. }
  412. if (dev->identity.cgrps != 1) {
  413. pr_err("nvm: only one group configuration supported.");
  414. goto err;
  415. }
  416. ret = nvm_core_init(dev);
  417. if (ret) {
  418. pr_err("nvm: could not initialize core structures.\n");
  419. goto err;
  420. }
  421. pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
  422. dev->name, dev->sec_per_pg, dev->nr_planes,
  423. dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
  424. dev->nr_chnls);
  425. return 0;
  426. err:
  427. pr_err("nvm: failed to initialize nvm\n");
  428. return ret;
  429. }
  430. static void nvm_exit(struct nvm_dev *dev)
  431. {
  432. if (dev->ppalist_pool)
  433. dev->ops->destroy_dma_pool(dev->ppalist_pool);
  434. nvm_free(dev);
  435. pr_info("nvm: successfully unloaded\n");
  436. }
  437. int nvm_register(struct request_queue *q, char *disk_name,
  438. struct nvm_dev_ops *ops)
  439. {
  440. struct nvm_dev *dev;
  441. int ret;
  442. if (!ops->identity)
  443. return -EINVAL;
  444. dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
  445. if (!dev)
  446. return -ENOMEM;
  447. dev->q = q;
  448. dev->ops = ops;
  449. strncpy(dev->name, disk_name, DISK_NAME_LEN);
  450. ret = nvm_init(dev);
  451. if (ret)
  452. goto err_init;
  453. if (dev->ops->max_phys_sect > 256) {
  454. pr_info("nvm: max sectors supported is 256.\n");
  455. ret = -EINVAL;
  456. goto err_init;
  457. }
  458. if (dev->ops->max_phys_sect > 1) {
  459. dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
  460. if (!dev->ppalist_pool) {
  461. pr_err("nvm: could not create ppa pool\n");
  462. ret = -ENOMEM;
  463. goto err_init;
  464. }
  465. }
  466. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
  467. ret = nvm_get_sysblock(dev, &dev->sb);
  468. if (!ret)
  469. pr_err("nvm: device not initialized.\n");
  470. else if (ret < 0)
  471. pr_err("nvm: err (%d) on device initialization\n", ret);
  472. }
  473. /* register device with a supported media manager */
  474. down_write(&nvm_lock);
  475. if (ret > 0)
  476. dev->mt = nvm_init_mgr(dev);
  477. list_add(&dev->devices, &nvm_devices);
  478. up_write(&nvm_lock);
  479. return 0;
  480. err_init:
  481. kfree(dev->lun_map);
  482. kfree(dev);
  483. return ret;
  484. }
  485. EXPORT_SYMBOL(nvm_register);
  486. void nvm_unregister(char *disk_name)
  487. {
  488. struct nvm_dev *dev;
  489. down_write(&nvm_lock);
  490. dev = nvm_find_nvm_dev(disk_name);
  491. if (!dev) {
  492. pr_err("nvm: could not find device %s to unregister\n",
  493. disk_name);
  494. up_write(&nvm_lock);
  495. return;
  496. }
  497. list_del(&dev->devices);
  498. up_write(&nvm_lock);
  499. nvm_exit(dev);
  500. kfree(dev->lun_map);
  501. kfree(dev);
  502. }
  503. EXPORT_SYMBOL(nvm_unregister);
  504. static const struct block_device_operations nvm_fops = {
  505. .owner = THIS_MODULE,
  506. };
  507. static int nvm_create_target(struct nvm_dev *dev,
  508. struct nvm_ioctl_create *create)
  509. {
  510. struct nvm_ioctl_create_simple *s = &create->conf.s;
  511. struct request_queue *tqueue;
  512. struct gendisk *tdisk;
  513. struct nvm_tgt_type *tt;
  514. struct nvm_target *t;
  515. void *targetdata;
  516. if (!dev->mt) {
  517. pr_info("nvm: device has no media manager registered.\n");
  518. return -ENODEV;
  519. }
  520. down_write(&nvm_lock);
  521. tt = nvm_find_target_type(create->tgttype);
  522. if (!tt) {
  523. pr_err("nvm: target type %s not found\n", create->tgttype);
  524. up_write(&nvm_lock);
  525. return -EINVAL;
  526. }
  527. list_for_each_entry(t, &dev->online_targets, list) {
  528. if (!strcmp(create->tgtname, t->disk->disk_name)) {
  529. pr_err("nvm: target name already exists.\n");
  530. up_write(&nvm_lock);
  531. return -EINVAL;
  532. }
  533. }
  534. up_write(&nvm_lock);
  535. t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
  536. if (!t)
  537. return -ENOMEM;
  538. tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
  539. if (!tqueue)
  540. goto err_t;
  541. blk_queue_make_request(tqueue, tt->make_rq);
  542. tdisk = alloc_disk(0);
  543. if (!tdisk)
  544. goto err_queue;
  545. sprintf(tdisk->disk_name, "%s", create->tgtname);
  546. tdisk->flags = GENHD_FL_EXT_DEVT;
  547. tdisk->major = 0;
  548. tdisk->first_minor = 0;
  549. tdisk->fops = &nvm_fops;
  550. tdisk->queue = tqueue;
  551. targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
  552. if (IS_ERR(targetdata))
  553. goto err_init;
  554. tdisk->private_data = targetdata;
  555. tqueue->queuedata = targetdata;
  556. blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
  557. set_capacity(tdisk, tt->capacity(targetdata));
  558. add_disk(tdisk);
  559. t->type = tt;
  560. t->disk = tdisk;
  561. down_write(&nvm_lock);
  562. list_add_tail(&t->list, &dev->online_targets);
  563. up_write(&nvm_lock);
  564. return 0;
  565. err_init:
  566. put_disk(tdisk);
  567. err_queue:
  568. blk_cleanup_queue(tqueue);
  569. err_t:
  570. kfree(t);
  571. return -ENOMEM;
  572. }
  573. static void nvm_remove_target(struct nvm_target *t)
  574. {
  575. struct nvm_tgt_type *tt = t->type;
  576. struct gendisk *tdisk = t->disk;
  577. struct request_queue *q = tdisk->queue;
  578. lockdep_assert_held(&nvm_lock);
  579. del_gendisk(tdisk);
  580. blk_cleanup_queue(q);
  581. if (tt->exit)
  582. tt->exit(tdisk->private_data);
  583. put_disk(tdisk);
  584. list_del(&t->list);
  585. kfree(t);
  586. }
  587. static int __nvm_configure_create(struct nvm_ioctl_create *create)
  588. {
  589. struct nvm_dev *dev;
  590. struct nvm_ioctl_create_simple *s;
  591. down_write(&nvm_lock);
  592. dev = nvm_find_nvm_dev(create->dev);
  593. up_write(&nvm_lock);
  594. if (!dev) {
  595. pr_err("nvm: device not found\n");
  596. return -EINVAL;
  597. }
  598. if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
  599. pr_err("nvm: config type not valid\n");
  600. return -EINVAL;
  601. }
  602. s = &create->conf.s;
  603. if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
  604. pr_err("nvm: lun out of bound (%u:%u > %u)\n",
  605. s->lun_begin, s->lun_end, dev->nr_luns);
  606. return -EINVAL;
  607. }
  608. return nvm_create_target(dev, create);
  609. }
  610. static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
  611. {
  612. struct nvm_target *t = NULL;
  613. struct nvm_dev *dev;
  614. int ret = -1;
  615. down_write(&nvm_lock);
  616. list_for_each_entry(dev, &nvm_devices, devices)
  617. list_for_each_entry(t, &dev->online_targets, list) {
  618. if (!strcmp(remove->tgtname, t->disk->disk_name)) {
  619. nvm_remove_target(t);
  620. ret = 0;
  621. break;
  622. }
  623. }
  624. up_write(&nvm_lock);
  625. if (ret) {
  626. pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
  627. return -EINVAL;
  628. }
  629. return 0;
  630. }
  631. #ifdef CONFIG_NVM_DEBUG
  632. static int nvm_configure_show(const char *val)
  633. {
  634. struct nvm_dev *dev;
  635. char opcode, devname[DISK_NAME_LEN];
  636. int ret;
  637. ret = sscanf(val, "%c %32s", &opcode, devname);
  638. if (ret != 2) {
  639. pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
  640. return -EINVAL;
  641. }
  642. down_write(&nvm_lock);
  643. dev = nvm_find_nvm_dev(devname);
  644. up_write(&nvm_lock);
  645. if (!dev) {
  646. pr_err("nvm: device not found\n");
  647. return -EINVAL;
  648. }
  649. if (!dev->mt)
  650. return 0;
  651. dev->mt->lun_info_print(dev);
  652. return 0;
  653. }
  654. static int nvm_configure_remove(const char *val)
  655. {
  656. struct nvm_ioctl_remove remove;
  657. char opcode;
  658. int ret;
  659. ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
  660. if (ret != 2) {
  661. pr_err("nvm: invalid command. Use \"d targetname\".\n");
  662. return -EINVAL;
  663. }
  664. remove.flags = 0;
  665. return __nvm_configure_remove(&remove);
  666. }
  667. static int nvm_configure_create(const char *val)
  668. {
  669. struct nvm_ioctl_create create;
  670. char opcode;
  671. int lun_begin, lun_end, ret;
  672. ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
  673. create.tgtname, create.tgttype,
  674. &lun_begin, &lun_end);
  675. if (ret != 6) {
  676. pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
  677. return -EINVAL;
  678. }
  679. create.flags = 0;
  680. create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
  681. create.conf.s.lun_begin = lun_begin;
  682. create.conf.s.lun_end = lun_end;
  683. return __nvm_configure_create(&create);
  684. }
  685. /* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
  686. static int nvm_configure_by_str_event(const char *val,
  687. const struct kernel_param *kp)
  688. {
  689. char opcode;
  690. int ret;
  691. ret = sscanf(val, "%c", &opcode);
  692. if (ret != 1) {
  693. pr_err("nvm: string must have the format of \"cmd ...\"\n");
  694. return -EINVAL;
  695. }
  696. switch (opcode) {
  697. case 'a':
  698. return nvm_configure_create(val);
  699. case 'd':
  700. return nvm_configure_remove(val);
  701. case 's':
  702. return nvm_configure_show(val);
  703. default:
  704. pr_err("nvm: invalid command\n");
  705. return -EINVAL;
  706. }
  707. return 0;
  708. }
  709. static int nvm_configure_get(char *buf, const struct kernel_param *kp)
  710. {
  711. int sz;
  712. struct nvm_dev *dev;
  713. sz = sprintf(buf, "available devices:\n");
  714. down_write(&nvm_lock);
  715. list_for_each_entry(dev, &nvm_devices, devices) {
  716. if (sz > 4095 - DISK_NAME_LEN - 2)
  717. break;
  718. sz += sprintf(buf + sz, " %32s\n", dev->name);
  719. }
  720. up_write(&nvm_lock);
  721. return sz;
  722. }
  723. static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
  724. .set = nvm_configure_by_str_event,
  725. .get = nvm_configure_get,
  726. };
  727. #undef MODULE_PARAM_PREFIX
  728. #define MODULE_PARAM_PREFIX "lnvm."
  729. module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
  730. 0644);
  731. #endif /* CONFIG_NVM_DEBUG */
  732. static long nvm_ioctl_info(struct file *file, void __user *arg)
  733. {
  734. struct nvm_ioctl_info *info;
  735. struct nvm_tgt_type *tt;
  736. int tgt_iter = 0;
  737. if (!capable(CAP_SYS_ADMIN))
  738. return -EPERM;
  739. info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
  740. if (IS_ERR(info))
  741. return -EFAULT;
  742. info->version[0] = NVM_VERSION_MAJOR;
  743. info->version[1] = NVM_VERSION_MINOR;
  744. info->version[2] = NVM_VERSION_PATCH;
  745. down_write(&nvm_lock);
  746. list_for_each_entry(tt, &nvm_targets, list) {
  747. struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
  748. tgt->version[0] = tt->version[0];
  749. tgt->version[1] = tt->version[1];
  750. tgt->version[2] = tt->version[2];
  751. strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
  752. tgt_iter++;
  753. }
  754. info->tgtsize = tgt_iter;
  755. up_write(&nvm_lock);
  756. if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
  757. kfree(info);
  758. return -EFAULT;
  759. }
  760. kfree(info);
  761. return 0;
  762. }
  763. static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
  764. {
  765. struct nvm_ioctl_get_devices *devices;
  766. struct nvm_dev *dev;
  767. int i = 0;
  768. if (!capable(CAP_SYS_ADMIN))
  769. return -EPERM;
  770. devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
  771. if (!devices)
  772. return -ENOMEM;
  773. down_write(&nvm_lock);
  774. list_for_each_entry(dev, &nvm_devices, devices) {
  775. struct nvm_ioctl_device_info *info = &devices->info[i];
  776. sprintf(info->devname, "%s", dev->name);
  777. if (dev->mt) {
  778. info->bmversion[0] = dev->mt->version[0];
  779. info->bmversion[1] = dev->mt->version[1];
  780. info->bmversion[2] = dev->mt->version[2];
  781. sprintf(info->bmname, "%s", dev->mt->name);
  782. } else {
  783. sprintf(info->bmname, "none");
  784. }
  785. i++;
  786. if (i > 31) {
  787. pr_err("nvm: max 31 devices can be reported.\n");
  788. break;
  789. }
  790. }
  791. up_write(&nvm_lock);
  792. devices->nr_devices = i;
  793. if (copy_to_user(arg, devices,
  794. sizeof(struct nvm_ioctl_get_devices))) {
  795. kfree(devices);
  796. return -EFAULT;
  797. }
  798. kfree(devices);
  799. return 0;
  800. }
  801. static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
  802. {
  803. struct nvm_ioctl_create create;
  804. if (!capable(CAP_SYS_ADMIN))
  805. return -EPERM;
  806. if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
  807. return -EFAULT;
  808. create.dev[DISK_NAME_LEN - 1] = '\0';
  809. create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
  810. create.tgtname[DISK_NAME_LEN - 1] = '\0';
  811. if (create.flags != 0) {
  812. pr_err("nvm: no flags supported\n");
  813. return -EINVAL;
  814. }
  815. return __nvm_configure_create(&create);
  816. }
  817. static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
  818. {
  819. struct nvm_ioctl_remove remove;
  820. if (!capable(CAP_SYS_ADMIN))
  821. return -EPERM;
  822. if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
  823. return -EFAULT;
  824. remove.tgtname[DISK_NAME_LEN - 1] = '\0';
  825. if (remove.flags != 0) {
  826. pr_err("nvm: no flags supported\n");
  827. return -EINVAL;
  828. }
  829. return __nvm_configure_remove(&remove);
  830. }
  831. static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
  832. {
  833. info->seqnr = 1;
  834. info->erase_cnt = 0;
  835. info->version = 1;
  836. }
  837. static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
  838. {
  839. struct nvm_dev *dev;
  840. struct nvm_sb_info info;
  841. int ret;
  842. down_write(&nvm_lock);
  843. dev = nvm_find_nvm_dev(init->dev);
  844. up_write(&nvm_lock);
  845. if (!dev) {
  846. pr_err("nvm: device not found\n");
  847. return -EINVAL;
  848. }
  849. nvm_setup_nvm_sb_info(&info);
  850. strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
  851. info.fs_ppa.ppa = -1;
  852. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
  853. ret = nvm_init_sysblock(dev, &info);
  854. if (ret)
  855. return ret;
  856. }
  857. memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
  858. down_write(&nvm_lock);
  859. dev->mt = nvm_init_mgr(dev);
  860. up_write(&nvm_lock);
  861. return 0;
  862. }
  863. static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
  864. {
  865. struct nvm_ioctl_dev_init init;
  866. if (!capable(CAP_SYS_ADMIN))
  867. return -EPERM;
  868. if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
  869. return -EFAULT;
  870. if (init.flags != 0) {
  871. pr_err("nvm: no flags supported\n");
  872. return -EINVAL;
  873. }
  874. init.dev[DISK_NAME_LEN - 1] = '\0';
  875. return __nvm_ioctl_dev_init(&init);
  876. }
  877. static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
  878. {
  879. struct nvm_ioctl_dev_factory fact;
  880. struct nvm_dev *dev;
  881. if (!capable(CAP_SYS_ADMIN))
  882. return -EPERM;
  883. if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
  884. return -EFAULT;
  885. fact.dev[DISK_NAME_LEN - 1] = '\0';
  886. if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
  887. return -EINVAL;
  888. down_write(&nvm_lock);
  889. dev = nvm_find_nvm_dev(fact.dev);
  890. up_write(&nvm_lock);
  891. if (!dev) {
  892. pr_err("nvm: device not found\n");
  893. return -EINVAL;
  894. }
  895. if (dev->mt) {
  896. dev->mt->unregister_mgr(dev);
  897. dev->mt = NULL;
  898. }
  899. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
  900. return nvm_dev_factory(dev, fact.flags);
  901. return 0;
  902. }
  903. static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
  904. {
  905. void __user *argp = (void __user *)arg;
  906. switch (cmd) {
  907. case NVM_INFO:
  908. return nvm_ioctl_info(file, argp);
  909. case NVM_GET_DEVICES:
  910. return nvm_ioctl_get_devices(file, argp);
  911. case NVM_DEV_CREATE:
  912. return nvm_ioctl_dev_create(file, argp);
  913. case NVM_DEV_REMOVE:
  914. return nvm_ioctl_dev_remove(file, argp);
  915. case NVM_DEV_INIT:
  916. return nvm_ioctl_dev_init(file, argp);
  917. case NVM_DEV_FACTORY:
  918. return nvm_ioctl_dev_factory(file, argp);
  919. }
  920. return 0;
  921. }
  922. static const struct file_operations _ctl_fops = {
  923. .open = nonseekable_open,
  924. .unlocked_ioctl = nvm_ctl_ioctl,
  925. .owner = THIS_MODULE,
  926. .llseek = noop_llseek,
  927. };
  928. static struct miscdevice _nvm_misc = {
  929. .minor = MISC_DYNAMIC_MINOR,
  930. .name = "lightnvm",
  931. .nodename = "lightnvm/control",
  932. .fops = &_ctl_fops,
  933. };
  934. MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
  935. static int __init nvm_mod_init(void)
  936. {
  937. int ret;
  938. ret = misc_register(&_nvm_misc);
  939. if (ret)
  940. pr_err("nvm: misc_register failed for control device");
  941. return ret;
  942. }
  943. static void __exit nvm_mod_exit(void)
  944. {
  945. misc_deregister(&_nvm_misc);
  946. }
  947. MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
  948. MODULE_LICENSE("GPL v2");
  949. MODULE_VERSION("0.1");
  950. module_init(nvm_mod_init);
  951. module_exit(nvm_mod_exit);