core.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
  3. * Initial release: Matias Bjorling <m@bjorling.me>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version
  7. * 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; see the file COPYING. If not, write to
  16. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  17. * USA.
  18. *
  19. */
  20. #include <linux/blkdev.h>
  21. #include <linux/blk-mq.h>
  22. #include <linux/list.h>
  23. #include <linux/types.h>
  24. #include <linux/sem.h>
  25. #include <linux/bitmap.h>
  26. #include <linux/module.h>
  27. #include <linux/miscdevice.h>
  28. #include <linux/lightnvm.h>
  29. #include <linux/sched/sysctl.h>
  30. #include <uapi/linux/lightnvm.h>
  31. static LIST_HEAD(nvm_targets);
  32. static LIST_HEAD(nvm_mgrs);
  33. static LIST_HEAD(nvm_devices);
  34. static DECLARE_RWSEM(nvm_lock);
  35. static struct nvm_tgt_type *nvm_find_target_type(const char *name)
  36. {
  37. struct nvm_tgt_type *tt;
  38. list_for_each_entry(tt, &nvm_targets, list)
  39. if (!strcmp(name, tt->name))
  40. return tt;
  41. return NULL;
  42. }
  43. int nvm_register_target(struct nvm_tgt_type *tt)
  44. {
  45. int ret = 0;
  46. down_write(&nvm_lock);
  47. if (nvm_find_target_type(tt->name))
  48. ret = -EEXIST;
  49. else
  50. list_add(&tt->list, &nvm_targets);
  51. up_write(&nvm_lock);
  52. return ret;
  53. }
  54. EXPORT_SYMBOL(nvm_register_target);
  55. void nvm_unregister_target(struct nvm_tgt_type *tt)
  56. {
  57. if (!tt)
  58. return;
  59. down_write(&nvm_lock);
  60. list_del(&tt->list);
  61. up_write(&nvm_lock);
  62. }
  63. EXPORT_SYMBOL(nvm_unregister_target);
  64. void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
  65. dma_addr_t *dma_handler)
  66. {
  67. return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
  68. dma_handler);
  69. }
  70. EXPORT_SYMBOL(nvm_dev_dma_alloc);
  71. void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
  72. dma_addr_t dma_handler)
  73. {
  74. dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
  75. }
  76. EXPORT_SYMBOL(nvm_dev_dma_free);
  77. static struct nvmm_type *nvm_find_mgr_type(const char *name)
  78. {
  79. struct nvmm_type *mt;
  80. list_for_each_entry(mt, &nvm_mgrs, list)
  81. if (!strcmp(name, mt->name))
  82. return mt;
  83. return NULL;
  84. }
  85. struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
  86. {
  87. struct nvmm_type *mt;
  88. int ret;
  89. lockdep_assert_held(&nvm_lock);
  90. list_for_each_entry(mt, &nvm_mgrs, list) {
  91. ret = mt->register_mgr(dev);
  92. if (ret < 0) {
  93. pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
  94. ret, dev->name);
  95. return NULL; /* initialization failed */
  96. } else if (ret > 0)
  97. return mt;
  98. }
  99. return NULL;
  100. }
  101. int nvm_register_mgr(struct nvmm_type *mt)
  102. {
  103. struct nvm_dev *dev;
  104. int ret = 0;
  105. down_write(&nvm_lock);
  106. if (nvm_find_mgr_type(mt->name)) {
  107. ret = -EEXIST;
  108. goto finish;
  109. } else {
  110. list_add(&mt->list, &nvm_mgrs);
  111. }
  112. /* try to register media mgr if any device have none configured */
  113. list_for_each_entry(dev, &nvm_devices, devices) {
  114. if (dev->mt)
  115. continue;
  116. dev->mt = nvm_init_mgr(dev);
  117. }
  118. finish:
  119. up_write(&nvm_lock);
  120. return ret;
  121. }
  122. EXPORT_SYMBOL(nvm_register_mgr);
  123. void nvm_unregister_mgr(struct nvmm_type *mt)
  124. {
  125. if (!mt)
  126. return;
  127. down_write(&nvm_lock);
  128. list_del(&mt->list);
  129. up_write(&nvm_lock);
  130. }
  131. EXPORT_SYMBOL(nvm_unregister_mgr);
  132. static struct nvm_dev *nvm_find_nvm_dev(const char *name)
  133. {
  134. struct nvm_dev *dev;
  135. list_for_each_entry(dev, &nvm_devices, devices)
  136. if (!strcmp(name, dev->name))
  137. return dev;
  138. return NULL;
  139. }
  140. struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
  141. unsigned long flags)
  142. {
  143. return dev->mt->get_blk(dev, lun, flags);
  144. }
  145. EXPORT_SYMBOL(nvm_get_blk);
  146. /* Assumes that all valid pages have already been moved on release to bm */
  147. void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
  148. {
  149. return dev->mt->put_blk(dev, blk);
  150. }
  151. EXPORT_SYMBOL(nvm_put_blk);
  152. int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
  153. {
  154. return dev->mt->submit_io(dev, rqd);
  155. }
  156. EXPORT_SYMBOL(nvm_submit_io);
  157. int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
  158. {
  159. return dev->mt->erase_blk(dev, blk, 0);
  160. }
  161. EXPORT_SYMBOL(nvm_erase_blk);
  162. void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
  163. {
  164. int i;
  165. if (rqd->nr_pages > 1) {
  166. for (i = 0; i < rqd->nr_pages; i++)
  167. rqd->ppa_list[i] = dev_to_generic_addr(dev,
  168. rqd->ppa_list[i]);
  169. } else {
  170. rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
  171. }
  172. }
  173. EXPORT_SYMBOL(nvm_addr_to_generic_mode);
  174. void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
  175. {
  176. int i;
  177. if (rqd->nr_pages > 1) {
  178. for (i = 0; i < rqd->nr_pages; i++)
  179. rqd->ppa_list[i] = generic_to_dev_addr(dev,
  180. rqd->ppa_list[i]);
  181. } else {
  182. rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
  183. }
  184. }
  185. EXPORT_SYMBOL(nvm_generic_to_addr_mode);
  186. int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
  187. struct ppa_addr *ppas, int nr_ppas)
  188. {
  189. int i, plane_cnt, pl_idx;
  190. if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
  191. rqd->nr_pages = 1;
  192. rqd->ppa_addr = ppas[0];
  193. return 0;
  194. }
  195. plane_cnt = (1 << dev->plane_mode);
  196. rqd->nr_pages = plane_cnt * nr_ppas;
  197. if (dev->ops->max_phys_sect < rqd->nr_pages)
  198. return -EINVAL;
  199. rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
  200. if (!rqd->ppa_list) {
  201. pr_err("nvm: failed to allocate dma memory\n");
  202. return -ENOMEM;
  203. }
  204. for (i = 0; i < nr_ppas; i++) {
  205. for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
  206. ppas[i].g.pl = pl_idx;
  207. rqd->ppa_list[(i * plane_cnt) + pl_idx] = ppas[i];
  208. }
  209. }
  210. return 0;
  211. }
  212. EXPORT_SYMBOL(nvm_set_rqd_ppalist);
  213. void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
  214. {
  215. if (!rqd->ppa_list)
  216. return;
  217. nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
  218. }
  219. EXPORT_SYMBOL(nvm_free_rqd_ppalist);
  220. int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa)
  221. {
  222. struct nvm_rq rqd;
  223. int ret;
  224. if (!dev->ops->erase_block)
  225. return 0;
  226. memset(&rqd, 0, sizeof(struct nvm_rq));
  227. ret = nvm_set_rqd_ppalist(dev, &rqd, &ppa, 1);
  228. if (ret)
  229. return ret;
  230. nvm_generic_to_addr_mode(dev, &rqd);
  231. ret = dev->ops->erase_block(dev, &rqd);
  232. nvm_free_rqd_ppalist(dev, &rqd);
  233. return ret;
  234. }
  235. EXPORT_SYMBOL(nvm_erase_ppa);
  236. void nvm_end_io(struct nvm_rq *rqd, int error)
  237. {
  238. rqd->end_io(rqd, error);
  239. }
  240. EXPORT_SYMBOL(nvm_end_io);
  241. static void nvm_end_io_sync(struct nvm_rq *rqd, int errors)
  242. {
  243. struct completion *waiting = rqd->wait;
  244. rqd->wait = NULL;
  245. complete(waiting);
  246. }
  247. static int nvm_core_init(struct nvm_dev *dev)
  248. {
  249. struct nvm_id *id = &dev->identity;
  250. struct nvm_id_group *grp = &id->groups[0];
  251. /* device values */
  252. dev->nr_chnls = grp->num_ch;
  253. dev->luns_per_chnl = grp->num_lun;
  254. dev->pgs_per_blk = grp->num_pg;
  255. dev->blks_per_lun = grp->num_blk;
  256. dev->nr_planes = grp->num_pln;
  257. dev->sec_size = grp->csecs;
  258. dev->oob_size = grp->sos;
  259. dev->sec_per_pg = grp->fpg_sz / grp->csecs;
  260. memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
  261. dev->plane_mode = NVM_PLANE_SINGLE;
  262. dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
  263. if (grp->mtype != 0) {
  264. pr_err("nvm: memory type not supported\n");
  265. return -EINVAL;
  266. }
  267. if (grp->fmtype != 0 && grp->fmtype != 1) {
  268. pr_err("nvm: flash type not supported\n");
  269. return -EINVAL;
  270. }
  271. if (grp->mpos & 0x020202)
  272. dev->plane_mode = NVM_PLANE_DOUBLE;
  273. if (grp->mpos & 0x040404)
  274. dev->plane_mode = NVM_PLANE_QUAD;
  275. /* calculated values */
  276. dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
  277. dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
  278. dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
  279. dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
  280. dev->total_blocks = dev->nr_planes *
  281. dev->blks_per_lun *
  282. dev->luns_per_chnl *
  283. dev->nr_chnls;
  284. dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
  285. INIT_LIST_HEAD(&dev->online_targets);
  286. return 0;
  287. }
  288. static void nvm_free(struct nvm_dev *dev)
  289. {
  290. if (!dev)
  291. return;
  292. if (dev->mt)
  293. dev->mt->unregister_mgr(dev);
  294. }
  295. static int nvm_init(struct nvm_dev *dev)
  296. {
  297. int ret = -EINVAL;
  298. if (!dev->q || !dev->ops)
  299. return ret;
  300. if (dev->ops->identity(dev, &dev->identity)) {
  301. pr_err("nvm: device could not be identified\n");
  302. goto err;
  303. }
  304. pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
  305. dev->identity.ver_id, dev->identity.vmnt,
  306. dev->identity.cgrps);
  307. if (dev->identity.ver_id != 1) {
  308. pr_err("nvm: device not supported by kernel.");
  309. goto err;
  310. }
  311. if (dev->identity.cgrps != 1) {
  312. pr_err("nvm: only one group configuration supported.");
  313. goto err;
  314. }
  315. ret = nvm_core_init(dev);
  316. if (ret) {
  317. pr_err("nvm: could not initialize core structures.\n");
  318. goto err;
  319. }
  320. pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
  321. dev->name, dev->sec_per_pg, dev->nr_planes,
  322. dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
  323. dev->nr_chnls);
  324. return 0;
  325. err:
  326. pr_err("nvm: failed to initialize nvm\n");
  327. return ret;
  328. }
  329. static void nvm_exit(struct nvm_dev *dev)
  330. {
  331. if (dev->ppalist_pool)
  332. dev->ops->destroy_dma_pool(dev->ppalist_pool);
  333. nvm_free(dev);
  334. pr_info("nvm: successfully unloaded\n");
  335. }
  336. int nvm_register(struct request_queue *q, char *disk_name,
  337. struct nvm_dev_ops *ops)
  338. {
  339. struct nvm_dev *dev;
  340. int ret;
  341. if (!ops->identity)
  342. return -EINVAL;
  343. dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
  344. if (!dev)
  345. return -ENOMEM;
  346. dev->q = q;
  347. dev->ops = ops;
  348. strncpy(dev->name, disk_name, DISK_NAME_LEN);
  349. ret = nvm_init(dev);
  350. if (ret)
  351. goto err_init;
  352. if (dev->ops->max_phys_sect > 256) {
  353. pr_info("nvm: max sectors supported is 256.\n");
  354. ret = -EINVAL;
  355. goto err_init;
  356. }
  357. if (dev->ops->max_phys_sect > 1) {
  358. dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
  359. if (!dev->ppalist_pool) {
  360. pr_err("nvm: could not create ppa pool\n");
  361. ret = -ENOMEM;
  362. goto err_init;
  363. }
  364. }
  365. /* register device with a supported media manager */
  366. down_write(&nvm_lock);
  367. dev->mt = nvm_init_mgr(dev);
  368. list_add(&dev->devices, &nvm_devices);
  369. up_write(&nvm_lock);
  370. return 0;
  371. err_init:
  372. kfree(dev);
  373. return ret;
  374. }
  375. EXPORT_SYMBOL(nvm_register);
  376. void nvm_unregister(char *disk_name)
  377. {
  378. struct nvm_dev *dev;
  379. down_write(&nvm_lock);
  380. dev = nvm_find_nvm_dev(disk_name);
  381. if (!dev) {
  382. pr_err("nvm: could not find device %s to unregister\n",
  383. disk_name);
  384. up_write(&nvm_lock);
  385. return;
  386. }
  387. list_del(&dev->devices);
  388. up_write(&nvm_lock);
  389. nvm_exit(dev);
  390. kfree(dev);
  391. }
  392. EXPORT_SYMBOL(nvm_unregister);
  393. static const struct block_device_operations nvm_fops = {
  394. .owner = THIS_MODULE,
  395. };
  396. static int nvm_create_target(struct nvm_dev *dev,
  397. struct nvm_ioctl_create *create)
  398. {
  399. struct nvm_ioctl_create_simple *s = &create->conf.s;
  400. struct request_queue *tqueue;
  401. struct gendisk *tdisk;
  402. struct nvm_tgt_type *tt;
  403. struct nvm_target *t;
  404. void *targetdata;
  405. if (!dev->mt) {
  406. pr_info("nvm: device has no media manager registered.\n");
  407. return -ENODEV;
  408. }
  409. down_write(&nvm_lock);
  410. tt = nvm_find_target_type(create->tgttype);
  411. if (!tt) {
  412. pr_err("nvm: target type %s not found\n", create->tgttype);
  413. up_write(&nvm_lock);
  414. return -EINVAL;
  415. }
  416. list_for_each_entry(t, &dev->online_targets, list) {
  417. if (!strcmp(create->tgtname, t->disk->disk_name)) {
  418. pr_err("nvm: target name already exists.\n");
  419. up_write(&nvm_lock);
  420. return -EINVAL;
  421. }
  422. }
  423. up_write(&nvm_lock);
  424. t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
  425. if (!t)
  426. return -ENOMEM;
  427. tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
  428. if (!tqueue)
  429. goto err_t;
  430. blk_queue_make_request(tqueue, tt->make_rq);
  431. tdisk = alloc_disk(0);
  432. if (!tdisk)
  433. goto err_queue;
  434. sprintf(tdisk->disk_name, "%s", create->tgtname);
  435. tdisk->flags = GENHD_FL_EXT_DEVT;
  436. tdisk->major = 0;
  437. tdisk->first_minor = 0;
  438. tdisk->fops = &nvm_fops;
  439. tdisk->queue = tqueue;
  440. targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
  441. if (IS_ERR(targetdata))
  442. goto err_init;
  443. tdisk->private_data = targetdata;
  444. tqueue->queuedata = targetdata;
  445. blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
  446. set_capacity(tdisk, tt->capacity(targetdata));
  447. add_disk(tdisk);
  448. t->type = tt;
  449. t->disk = tdisk;
  450. down_write(&nvm_lock);
  451. list_add_tail(&t->list, &dev->online_targets);
  452. up_write(&nvm_lock);
  453. return 0;
  454. err_init:
  455. put_disk(tdisk);
  456. err_queue:
  457. blk_cleanup_queue(tqueue);
  458. err_t:
  459. kfree(t);
  460. return -ENOMEM;
  461. }
  462. static void nvm_remove_target(struct nvm_target *t)
  463. {
  464. struct nvm_tgt_type *tt = t->type;
  465. struct gendisk *tdisk = t->disk;
  466. struct request_queue *q = tdisk->queue;
  467. lockdep_assert_held(&nvm_lock);
  468. del_gendisk(tdisk);
  469. blk_cleanup_queue(q);
  470. if (tt->exit)
  471. tt->exit(tdisk->private_data);
  472. put_disk(tdisk);
  473. list_del(&t->list);
  474. kfree(t);
  475. }
  476. static int __nvm_configure_create(struct nvm_ioctl_create *create)
  477. {
  478. struct nvm_dev *dev;
  479. struct nvm_ioctl_create_simple *s;
  480. down_write(&nvm_lock);
  481. dev = nvm_find_nvm_dev(create->dev);
  482. up_write(&nvm_lock);
  483. if (!dev) {
  484. pr_err("nvm: device not found\n");
  485. return -EINVAL;
  486. }
  487. if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
  488. pr_err("nvm: config type not valid\n");
  489. return -EINVAL;
  490. }
  491. s = &create->conf.s;
  492. if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
  493. pr_err("nvm: lun out of bound (%u:%u > %u)\n",
  494. s->lun_begin, s->lun_end, dev->nr_luns);
  495. return -EINVAL;
  496. }
  497. return nvm_create_target(dev, create);
  498. }
  499. static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
  500. {
  501. struct nvm_target *t = NULL;
  502. struct nvm_dev *dev;
  503. int ret = -1;
  504. down_write(&nvm_lock);
  505. list_for_each_entry(dev, &nvm_devices, devices)
  506. list_for_each_entry(t, &dev->online_targets, list) {
  507. if (!strcmp(remove->tgtname, t->disk->disk_name)) {
  508. nvm_remove_target(t);
  509. ret = 0;
  510. break;
  511. }
  512. }
  513. up_write(&nvm_lock);
  514. if (ret) {
  515. pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
  516. return -EINVAL;
  517. }
  518. return 0;
  519. }
  520. #ifdef CONFIG_NVM_DEBUG
  521. static int nvm_configure_show(const char *val)
  522. {
  523. struct nvm_dev *dev;
  524. char opcode, devname[DISK_NAME_LEN];
  525. int ret;
  526. ret = sscanf(val, "%c %32s", &opcode, devname);
  527. if (ret != 2) {
  528. pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
  529. return -EINVAL;
  530. }
  531. down_write(&nvm_lock);
  532. dev = nvm_find_nvm_dev(devname);
  533. up_write(&nvm_lock);
  534. if (!dev) {
  535. pr_err("nvm: device not found\n");
  536. return -EINVAL;
  537. }
  538. if (!dev->mt)
  539. return 0;
  540. dev->mt->lun_info_print(dev);
  541. return 0;
  542. }
  543. static int nvm_configure_remove(const char *val)
  544. {
  545. struct nvm_ioctl_remove remove;
  546. char opcode;
  547. int ret;
  548. ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
  549. if (ret != 2) {
  550. pr_err("nvm: invalid command. Use \"d targetname\".\n");
  551. return -EINVAL;
  552. }
  553. remove.flags = 0;
  554. return __nvm_configure_remove(&remove);
  555. }
  556. static int nvm_configure_create(const char *val)
  557. {
  558. struct nvm_ioctl_create create;
  559. char opcode;
  560. int lun_begin, lun_end, ret;
  561. ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
  562. create.tgtname, create.tgttype,
  563. &lun_begin, &lun_end);
  564. if (ret != 6) {
  565. pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
  566. return -EINVAL;
  567. }
  568. create.flags = 0;
  569. create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
  570. create.conf.s.lun_begin = lun_begin;
  571. create.conf.s.lun_end = lun_end;
  572. return __nvm_configure_create(&create);
  573. }
  574. /* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
  575. static int nvm_configure_by_str_event(const char *val,
  576. const struct kernel_param *kp)
  577. {
  578. char opcode;
  579. int ret;
  580. ret = sscanf(val, "%c", &opcode);
  581. if (ret != 1) {
  582. pr_err("nvm: string must have the format of \"cmd ...\"\n");
  583. return -EINVAL;
  584. }
  585. switch (opcode) {
  586. case 'a':
  587. return nvm_configure_create(val);
  588. case 'd':
  589. return nvm_configure_remove(val);
  590. case 's':
  591. return nvm_configure_show(val);
  592. default:
  593. pr_err("nvm: invalid command\n");
  594. return -EINVAL;
  595. }
  596. return 0;
  597. }
  598. static int nvm_configure_get(char *buf, const struct kernel_param *kp)
  599. {
  600. int sz = 0;
  601. char *buf_start = buf;
  602. struct nvm_dev *dev;
  603. buf += sprintf(buf, "available devices:\n");
  604. down_write(&nvm_lock);
  605. list_for_each_entry(dev, &nvm_devices, devices) {
  606. if (sz > 4095 - DISK_NAME_LEN)
  607. break;
  608. buf += sprintf(buf, " %32s\n", dev->name);
  609. }
  610. up_write(&nvm_lock);
  611. return buf - buf_start - 1;
  612. }
  613. static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
  614. .set = nvm_configure_by_str_event,
  615. .get = nvm_configure_get,
  616. };
  617. #undef MODULE_PARAM_PREFIX
  618. #define MODULE_PARAM_PREFIX "lnvm."
  619. module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
  620. 0644);
  621. #endif /* CONFIG_NVM_DEBUG */
  622. static long nvm_ioctl_info(struct file *file, void __user *arg)
  623. {
  624. struct nvm_ioctl_info *info;
  625. struct nvm_tgt_type *tt;
  626. int tgt_iter = 0;
  627. if (!capable(CAP_SYS_ADMIN))
  628. return -EPERM;
  629. info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
  630. if (IS_ERR(info))
  631. return -EFAULT;
  632. info->version[0] = NVM_VERSION_MAJOR;
  633. info->version[1] = NVM_VERSION_MINOR;
  634. info->version[2] = NVM_VERSION_PATCH;
  635. down_write(&nvm_lock);
  636. list_for_each_entry(tt, &nvm_targets, list) {
  637. struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
  638. tgt->version[0] = tt->version[0];
  639. tgt->version[1] = tt->version[1];
  640. tgt->version[2] = tt->version[2];
  641. strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
  642. tgt_iter++;
  643. }
  644. info->tgtsize = tgt_iter;
  645. up_write(&nvm_lock);
  646. if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
  647. kfree(info);
  648. return -EFAULT;
  649. }
  650. kfree(info);
  651. return 0;
  652. }
  653. static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
  654. {
  655. struct nvm_ioctl_get_devices *devices;
  656. struct nvm_dev *dev;
  657. int i = 0;
  658. if (!capable(CAP_SYS_ADMIN))
  659. return -EPERM;
  660. devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
  661. if (!devices)
  662. return -ENOMEM;
  663. down_write(&nvm_lock);
  664. list_for_each_entry(dev, &nvm_devices, devices) {
  665. struct nvm_ioctl_device_info *info = &devices->info[i];
  666. sprintf(info->devname, "%s", dev->name);
  667. if (dev->mt) {
  668. info->bmversion[0] = dev->mt->version[0];
  669. info->bmversion[1] = dev->mt->version[1];
  670. info->bmversion[2] = dev->mt->version[2];
  671. sprintf(info->bmname, "%s", dev->mt->name);
  672. } else {
  673. sprintf(info->bmname, "none");
  674. }
  675. i++;
  676. if (i > 31) {
  677. pr_err("nvm: max 31 devices can be reported.\n");
  678. break;
  679. }
  680. }
  681. up_write(&nvm_lock);
  682. devices->nr_devices = i;
  683. if (copy_to_user(arg, devices,
  684. sizeof(struct nvm_ioctl_get_devices))) {
  685. kfree(devices);
  686. return -EFAULT;
  687. }
  688. kfree(devices);
  689. return 0;
  690. }
  691. static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
  692. {
  693. struct nvm_ioctl_create create;
  694. if (!capable(CAP_SYS_ADMIN))
  695. return -EPERM;
  696. if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
  697. return -EFAULT;
  698. create.dev[DISK_NAME_LEN - 1] = '\0';
  699. create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
  700. create.tgtname[DISK_NAME_LEN - 1] = '\0';
  701. if (create.flags != 0) {
  702. pr_err("nvm: no flags supported\n");
  703. return -EINVAL;
  704. }
  705. return __nvm_configure_create(&create);
  706. }
  707. static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
  708. {
  709. struct nvm_ioctl_remove remove;
  710. if (!capable(CAP_SYS_ADMIN))
  711. return -EPERM;
  712. if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
  713. return -EFAULT;
  714. remove.tgtname[DISK_NAME_LEN - 1] = '\0';
  715. if (remove.flags != 0) {
  716. pr_err("nvm: no flags supported\n");
  717. return -EINVAL;
  718. }
  719. return __nvm_configure_remove(&remove);
  720. }
  721. static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
  722. {
  723. void __user *argp = (void __user *)arg;
  724. switch (cmd) {
  725. case NVM_INFO:
  726. return nvm_ioctl_info(file, argp);
  727. case NVM_GET_DEVICES:
  728. return nvm_ioctl_get_devices(file, argp);
  729. case NVM_DEV_CREATE:
  730. return nvm_ioctl_dev_create(file, argp);
  731. case NVM_DEV_REMOVE:
  732. return nvm_ioctl_dev_remove(file, argp);
  733. }
  734. return 0;
  735. }
  736. static const struct file_operations _ctl_fops = {
  737. .open = nonseekable_open,
  738. .unlocked_ioctl = nvm_ctl_ioctl,
  739. .owner = THIS_MODULE,
  740. .llseek = noop_llseek,
  741. };
  742. static struct miscdevice _nvm_misc = {
  743. .minor = MISC_DYNAMIC_MINOR,
  744. .name = "lightnvm",
  745. .nodename = "lightnvm/control",
  746. .fops = &_ctl_fops,
  747. };
  748. MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
  749. static int __init nvm_mod_init(void)
  750. {
  751. int ret;
  752. ret = misc_register(&_nvm_misc);
  753. if (ret)
  754. pr_err("nvm: misc_register failed for control device");
  755. return ret;
  756. }
  757. static void __exit nvm_mod_exit(void)
  758. {
  759. misc_deregister(&_nvm_misc);
  760. }
  761. MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
  762. MODULE_LICENSE("GPL v2");
  763. MODULE_VERSION("0.1");
  764. module_init(nvm_mod_init);
  765. module_exit(nvm_mod_exit);