core.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
  3. * Initial release: Matias Bjorling <m@bjorling.me>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version
  7. * 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; see the file COPYING. If not, write to
  16. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  17. * USA.
  18. *
  19. */
  20. #include <linux/list.h>
  21. #include <linux/types.h>
  22. #include <linux/sem.h>
  23. #include <linux/bitmap.h>
  24. #include <linux/module.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/miscdevice.h>
  27. #include <linux/lightnvm.h>
  28. #include <linux/sched/sysctl.h>
  29. static LIST_HEAD(nvm_tgt_types);
  30. static DECLARE_RWSEM(nvm_tgtt_lock);
  31. static LIST_HEAD(nvm_devices);
  32. static DECLARE_RWSEM(nvm_lock);
  33. /* Map between virtual and physical channel and lun */
  34. struct nvm_ch_map {
  35. int ch_off;
  36. int num_lun;
  37. int *lun_offs;
  38. };
  39. struct nvm_dev_map {
  40. struct nvm_ch_map *chnls;
  41. int num_ch;
  42. };
  43. static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
  44. {
  45. struct nvm_target *tgt;
  46. list_for_each_entry(tgt, &dev->targets, list)
  47. if (!strcmp(name, tgt->disk->disk_name))
  48. return tgt;
  49. return NULL;
  50. }
  51. static bool nvm_target_exists(const char *name)
  52. {
  53. struct nvm_dev *dev;
  54. struct nvm_target *tgt;
  55. bool ret = false;
  56. down_write(&nvm_lock);
  57. list_for_each_entry(dev, &nvm_devices, devices) {
  58. mutex_lock(&dev->mlock);
  59. list_for_each_entry(tgt, &dev->targets, list) {
  60. if (!strcmp(name, tgt->disk->disk_name)) {
  61. ret = true;
  62. mutex_unlock(&dev->mlock);
  63. goto out;
  64. }
  65. }
  66. mutex_unlock(&dev->mlock);
  67. }
  68. out:
  69. up_write(&nvm_lock);
  70. return ret;
  71. }
  72. static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
  73. {
  74. int i;
  75. for (i = lun_begin; i <= lun_end; i++) {
  76. if (test_and_set_bit(i, dev->lun_map)) {
  77. pr_err("nvm: lun %d already allocated\n", i);
  78. goto err;
  79. }
  80. }
  81. return 0;
  82. err:
  83. while (--i >= lun_begin)
  84. clear_bit(i, dev->lun_map);
  85. return -EBUSY;
  86. }
  87. static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
  88. int lun_end)
  89. {
  90. int i;
  91. for (i = lun_begin; i <= lun_end; i++)
  92. WARN_ON(!test_and_clear_bit(i, dev->lun_map));
  93. }
  94. static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
  95. {
  96. struct nvm_dev *dev = tgt_dev->parent;
  97. struct nvm_dev_map *dev_map = tgt_dev->map;
  98. int i, j;
  99. for (i = 0; i < dev_map->num_ch; i++) {
  100. struct nvm_ch_map *ch_map = &dev_map->chnls[i];
  101. int *lun_offs = ch_map->lun_offs;
  102. int ch = i + ch_map->ch_off;
  103. if (clear) {
  104. for (j = 0; j < ch_map->num_lun; j++) {
  105. int lun = j + lun_offs[j];
  106. int lunid = (ch * dev->geo.num_lun) + lun;
  107. WARN_ON(!test_and_clear_bit(lunid,
  108. dev->lun_map));
  109. }
  110. }
  111. kfree(ch_map->lun_offs);
  112. }
  113. kfree(dev_map->chnls);
  114. kfree(dev_map);
  115. kfree(tgt_dev->luns);
  116. kfree(tgt_dev);
  117. }
  118. static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
  119. u16 lun_begin, u16 lun_end,
  120. u16 op)
  121. {
  122. struct nvm_tgt_dev *tgt_dev = NULL;
  123. struct nvm_dev_map *dev_rmap = dev->rmap;
  124. struct nvm_dev_map *dev_map;
  125. struct ppa_addr *luns;
  126. int num_lun = lun_end - lun_begin + 1;
  127. int luns_left = num_lun;
  128. int num_ch = num_lun / dev->geo.num_lun;
  129. int num_ch_mod = num_lun % dev->geo.num_lun;
  130. int bch = lun_begin / dev->geo.num_lun;
  131. int blun = lun_begin % dev->geo.num_lun;
  132. int lunid = 0;
  133. int lun_balanced = 1;
  134. int sec_per_lun, prev_num_lun;
  135. int i, j;
  136. num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
  137. dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
  138. if (!dev_map)
  139. goto err_dev;
  140. dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
  141. if (!dev_map->chnls)
  142. goto err_chnls;
  143. luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
  144. if (!luns)
  145. goto err_luns;
  146. prev_num_lun = (luns_left > dev->geo.num_lun) ?
  147. dev->geo.num_lun : luns_left;
  148. for (i = 0; i < num_ch; i++) {
  149. struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
  150. int *lun_roffs = ch_rmap->lun_offs;
  151. struct nvm_ch_map *ch_map = &dev_map->chnls[i];
  152. int *lun_offs;
  153. int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
  154. dev->geo.num_lun : luns_left;
  155. if (lun_balanced && prev_num_lun != luns_in_chnl)
  156. lun_balanced = 0;
  157. ch_map->ch_off = ch_rmap->ch_off = bch;
  158. ch_map->num_lun = luns_in_chnl;
  159. lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
  160. if (!lun_offs)
  161. goto err_ch;
  162. for (j = 0; j < luns_in_chnl; j++) {
  163. luns[lunid].ppa = 0;
  164. luns[lunid].a.ch = i;
  165. luns[lunid++].a.lun = j;
  166. lun_offs[j] = blun;
  167. lun_roffs[j + blun] = blun;
  168. }
  169. ch_map->lun_offs = lun_offs;
  170. /* when starting a new channel, lun offset is reset */
  171. blun = 0;
  172. luns_left -= luns_in_chnl;
  173. }
  174. dev_map->num_ch = num_ch;
  175. tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
  176. if (!tgt_dev)
  177. goto err_ch;
  178. /* Inherit device geometry from parent */
  179. memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
  180. /* Target device only owns a portion of the physical device */
  181. tgt_dev->geo.num_ch = num_ch;
  182. tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
  183. tgt_dev->geo.all_luns = num_lun;
  184. tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
  185. tgt_dev->geo.op = op;
  186. sec_per_lun = dev->geo.clba * dev->geo.num_chk;
  187. tgt_dev->geo.total_secs = num_lun * sec_per_lun;
  188. tgt_dev->q = dev->q;
  189. tgt_dev->map = dev_map;
  190. tgt_dev->luns = luns;
  191. tgt_dev->parent = dev;
  192. return tgt_dev;
  193. err_ch:
  194. while (--i >= 0)
  195. kfree(dev_map->chnls[i].lun_offs);
  196. kfree(luns);
  197. err_luns:
  198. kfree(dev_map->chnls);
  199. err_chnls:
  200. kfree(dev_map);
  201. err_dev:
  202. return tgt_dev;
  203. }
  204. static const struct block_device_operations nvm_fops = {
  205. .owner = THIS_MODULE,
  206. };
  207. static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
  208. {
  209. struct nvm_tgt_type *tt;
  210. list_for_each_entry(tt, &nvm_tgt_types, list)
  211. if (!strcmp(name, tt->name))
  212. return tt;
  213. return NULL;
  214. }
  215. static struct nvm_tgt_type *nvm_find_target_type(const char *name)
  216. {
  217. struct nvm_tgt_type *tt;
  218. down_write(&nvm_tgtt_lock);
  219. tt = __nvm_find_target_type(name);
  220. up_write(&nvm_tgtt_lock);
  221. return tt;
  222. }
  223. static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
  224. int lun_end)
  225. {
  226. if (lun_begin > lun_end || lun_end >= geo->all_luns) {
  227. pr_err("nvm: lun out of bound (%u:%u > %u)\n",
  228. lun_begin, lun_end, geo->all_luns - 1);
  229. return -EINVAL;
  230. }
  231. return 0;
  232. }
  233. static int __nvm_config_simple(struct nvm_dev *dev,
  234. struct nvm_ioctl_create_simple *s)
  235. {
  236. struct nvm_geo *geo = &dev->geo;
  237. if (s->lun_begin == -1 && s->lun_end == -1) {
  238. s->lun_begin = 0;
  239. s->lun_end = geo->all_luns - 1;
  240. }
  241. return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
  242. }
  243. static int __nvm_config_extended(struct nvm_dev *dev,
  244. struct nvm_ioctl_create_extended *e)
  245. {
  246. if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
  247. e->lun_begin = 0;
  248. e->lun_end = dev->geo.all_luns - 1;
  249. }
  250. /* op not set falls into target's default */
  251. if (e->op == 0xFFFF) {
  252. e->op = NVM_TARGET_DEFAULT_OP;
  253. } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
  254. pr_err("nvm: invalid over provisioning value\n");
  255. return -EINVAL;
  256. }
  257. return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
  258. }
  259. static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
  260. {
  261. struct nvm_ioctl_create_extended e;
  262. struct request_queue *tqueue;
  263. struct gendisk *tdisk;
  264. struct nvm_tgt_type *tt;
  265. struct nvm_target *t;
  266. struct nvm_tgt_dev *tgt_dev;
  267. void *targetdata;
  268. int ret;
  269. switch (create->conf.type) {
  270. case NVM_CONFIG_TYPE_SIMPLE:
  271. ret = __nvm_config_simple(dev, &create->conf.s);
  272. if (ret)
  273. return ret;
  274. e.lun_begin = create->conf.s.lun_begin;
  275. e.lun_end = create->conf.s.lun_end;
  276. e.op = NVM_TARGET_DEFAULT_OP;
  277. break;
  278. case NVM_CONFIG_TYPE_EXTENDED:
  279. ret = __nvm_config_extended(dev, &create->conf.e);
  280. if (ret)
  281. return ret;
  282. e = create->conf.e;
  283. break;
  284. default:
  285. pr_err("nvm: config type not valid\n");
  286. return -EINVAL;
  287. }
  288. tt = nvm_find_target_type(create->tgttype);
  289. if (!tt) {
  290. pr_err("nvm: target type %s not found\n", create->tgttype);
  291. return -EINVAL;
  292. }
  293. if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
  294. pr_err("nvm: device is incompatible with target L2P type.\n");
  295. return -EINVAL;
  296. }
  297. if (nvm_target_exists(create->tgtname)) {
  298. pr_err("nvm: target name already exists (%s)\n",
  299. create->tgtname);
  300. return -EINVAL;
  301. }
  302. ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
  303. if (ret)
  304. return ret;
  305. t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
  306. if (!t) {
  307. ret = -ENOMEM;
  308. goto err_reserve;
  309. }
  310. tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
  311. if (!tgt_dev) {
  312. pr_err("nvm: could not create target device\n");
  313. ret = -ENOMEM;
  314. goto err_t;
  315. }
  316. tdisk = alloc_disk(0);
  317. if (!tdisk) {
  318. ret = -ENOMEM;
  319. goto err_dev;
  320. }
  321. tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL);
  322. if (!tqueue) {
  323. ret = -ENOMEM;
  324. goto err_disk;
  325. }
  326. blk_queue_make_request(tqueue, tt->make_rq);
  327. strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
  328. tdisk->flags = GENHD_FL_EXT_DEVT;
  329. tdisk->major = 0;
  330. tdisk->first_minor = 0;
  331. tdisk->fops = &nvm_fops;
  332. tdisk->queue = tqueue;
  333. targetdata = tt->init(tgt_dev, tdisk, create->flags);
  334. if (IS_ERR(targetdata)) {
  335. ret = PTR_ERR(targetdata);
  336. goto err_init;
  337. }
  338. tdisk->private_data = targetdata;
  339. tqueue->queuedata = targetdata;
  340. blk_queue_max_hw_sectors(tqueue,
  341. (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
  342. set_capacity(tdisk, tt->capacity(targetdata));
  343. add_disk(tdisk);
  344. if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
  345. ret = -ENOMEM;
  346. goto err_sysfs;
  347. }
  348. t->type = tt;
  349. t->disk = tdisk;
  350. t->dev = tgt_dev;
  351. mutex_lock(&dev->mlock);
  352. list_add_tail(&t->list, &dev->targets);
  353. mutex_unlock(&dev->mlock);
  354. __module_get(tt->owner);
  355. return 0;
  356. err_sysfs:
  357. if (tt->exit)
  358. tt->exit(targetdata, true);
  359. err_init:
  360. blk_cleanup_queue(tqueue);
  361. tdisk->queue = NULL;
  362. err_disk:
  363. put_disk(tdisk);
  364. err_dev:
  365. nvm_remove_tgt_dev(tgt_dev, 0);
  366. err_t:
  367. kfree(t);
  368. err_reserve:
  369. nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
  370. return ret;
  371. }
  372. static void __nvm_remove_target(struct nvm_target *t, bool graceful)
  373. {
  374. struct nvm_tgt_type *tt = t->type;
  375. struct gendisk *tdisk = t->disk;
  376. struct request_queue *q = tdisk->queue;
  377. del_gendisk(tdisk);
  378. blk_cleanup_queue(q);
  379. if (tt->sysfs_exit)
  380. tt->sysfs_exit(tdisk);
  381. if (tt->exit)
  382. tt->exit(tdisk->private_data, graceful);
  383. nvm_remove_tgt_dev(t->dev, 1);
  384. put_disk(tdisk);
  385. module_put(t->type->owner);
  386. list_del(&t->list);
  387. kfree(t);
  388. }
  389. /**
  390. * nvm_remove_tgt - Removes a target from the media manager
  391. * @dev: device
  392. * @remove: ioctl structure with target name to remove.
  393. *
  394. * Returns:
  395. * 0: on success
  396. * 1: on not found
  397. * <0: on error
  398. */
  399. static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
  400. {
  401. struct nvm_target *t;
  402. mutex_lock(&dev->mlock);
  403. t = nvm_find_target(dev, remove->tgtname);
  404. if (!t) {
  405. mutex_unlock(&dev->mlock);
  406. return 1;
  407. }
  408. __nvm_remove_target(t, true);
  409. mutex_unlock(&dev->mlock);
  410. return 0;
  411. }
  412. static int nvm_register_map(struct nvm_dev *dev)
  413. {
  414. struct nvm_dev_map *rmap;
  415. int i, j;
  416. rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
  417. if (!rmap)
  418. goto err_rmap;
  419. rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
  420. GFP_KERNEL);
  421. if (!rmap->chnls)
  422. goto err_chnls;
  423. for (i = 0; i < dev->geo.num_ch; i++) {
  424. struct nvm_ch_map *ch_rmap;
  425. int *lun_roffs;
  426. int luns_in_chnl = dev->geo.num_lun;
  427. ch_rmap = &rmap->chnls[i];
  428. ch_rmap->ch_off = -1;
  429. ch_rmap->num_lun = luns_in_chnl;
  430. lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
  431. if (!lun_roffs)
  432. goto err_ch;
  433. for (j = 0; j < luns_in_chnl; j++)
  434. lun_roffs[j] = -1;
  435. ch_rmap->lun_offs = lun_roffs;
  436. }
  437. dev->rmap = rmap;
  438. return 0;
  439. err_ch:
  440. while (--i >= 0)
  441. kfree(rmap->chnls[i].lun_offs);
  442. err_chnls:
  443. kfree(rmap);
  444. err_rmap:
  445. return -ENOMEM;
  446. }
  447. static void nvm_unregister_map(struct nvm_dev *dev)
  448. {
  449. struct nvm_dev_map *rmap = dev->rmap;
  450. int i;
  451. for (i = 0; i < dev->geo.num_ch; i++)
  452. kfree(rmap->chnls[i].lun_offs);
  453. kfree(rmap->chnls);
  454. kfree(rmap);
  455. }
  456. static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
  457. {
  458. struct nvm_dev_map *dev_map = tgt_dev->map;
  459. struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
  460. int lun_off = ch_map->lun_offs[p->a.lun];
  461. p->a.ch += ch_map->ch_off;
  462. p->a.lun += lun_off;
  463. }
  464. static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
  465. {
  466. struct nvm_dev *dev = tgt_dev->parent;
  467. struct nvm_dev_map *dev_rmap = dev->rmap;
  468. struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
  469. int lun_roff = ch_rmap->lun_offs[p->a.lun];
  470. p->a.ch -= ch_rmap->ch_off;
  471. p->a.lun -= lun_roff;
  472. }
  473. static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
  474. struct ppa_addr *ppa_list, int nr_ppas)
  475. {
  476. int i;
  477. for (i = 0; i < nr_ppas; i++) {
  478. nvm_map_to_dev(tgt_dev, &ppa_list[i]);
  479. ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
  480. }
  481. }
  482. static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
  483. struct ppa_addr *ppa_list, int nr_ppas)
  484. {
  485. int i;
  486. for (i = 0; i < nr_ppas; i++) {
  487. ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
  488. nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
  489. }
  490. }
  491. static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  492. {
  493. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  494. nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
  495. }
  496. static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  497. {
  498. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  499. nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
  500. }
  501. int nvm_register_tgt_type(struct nvm_tgt_type *tt)
  502. {
  503. int ret = 0;
  504. down_write(&nvm_tgtt_lock);
  505. if (__nvm_find_target_type(tt->name))
  506. ret = -EEXIST;
  507. else
  508. list_add(&tt->list, &nvm_tgt_types);
  509. up_write(&nvm_tgtt_lock);
  510. return ret;
  511. }
  512. EXPORT_SYMBOL(nvm_register_tgt_type);
  513. void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
  514. {
  515. if (!tt)
  516. return;
  517. down_write(&nvm_tgtt_lock);
  518. list_del(&tt->list);
  519. up_write(&nvm_tgtt_lock);
  520. }
  521. EXPORT_SYMBOL(nvm_unregister_tgt_type);
  522. void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
  523. dma_addr_t *dma_handler)
  524. {
  525. return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
  526. dma_handler);
  527. }
  528. EXPORT_SYMBOL(nvm_dev_dma_alloc);
  529. void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
  530. {
  531. dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
  532. }
  533. EXPORT_SYMBOL(nvm_dev_dma_free);
  534. static struct nvm_dev *nvm_find_nvm_dev(const char *name)
  535. {
  536. struct nvm_dev *dev;
  537. list_for_each_entry(dev, &nvm_devices, devices)
  538. if (!strcmp(name, dev->name))
  539. return dev;
  540. return NULL;
  541. }
  542. static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
  543. const struct ppa_addr *ppas, int nr_ppas)
  544. {
  545. struct nvm_dev *dev = tgt_dev->parent;
  546. struct nvm_geo *geo = &tgt_dev->geo;
  547. int i, plane_cnt, pl_idx;
  548. struct ppa_addr ppa;
  549. if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
  550. rqd->nr_ppas = nr_ppas;
  551. rqd->ppa_addr = ppas[0];
  552. return 0;
  553. }
  554. rqd->nr_ppas = nr_ppas;
  555. rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
  556. if (!rqd->ppa_list) {
  557. pr_err("nvm: failed to allocate dma memory\n");
  558. return -ENOMEM;
  559. }
  560. plane_cnt = geo->pln_mode;
  561. rqd->nr_ppas *= plane_cnt;
  562. for (i = 0; i < nr_ppas; i++) {
  563. for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
  564. ppa = ppas[i];
  565. ppa.g.pl = pl_idx;
  566. rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
  567. }
  568. }
  569. return 0;
  570. }
  571. static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
  572. struct nvm_rq *rqd)
  573. {
  574. if (!rqd->ppa_list)
  575. return;
  576. nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
  577. }
  578. static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
  579. {
  580. int flags = 0;
  581. if (geo->version == NVM_OCSSD_SPEC_20)
  582. return 0;
  583. if (rqd->is_seq)
  584. flags |= geo->pln_mode >> 1;
  585. if (rqd->opcode == NVM_OP_PREAD)
  586. flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
  587. else if (rqd->opcode == NVM_OP_PWRITE)
  588. flags |= NVM_IO_SCRAMBLE_ENABLE;
  589. return flags;
  590. }
  591. int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  592. {
  593. struct nvm_dev *dev = tgt_dev->parent;
  594. int ret;
  595. if (!dev->ops->submit_io)
  596. return -ENODEV;
  597. nvm_rq_tgt_to_dev(tgt_dev, rqd);
  598. rqd->dev = tgt_dev;
  599. rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
  600. /* In case of error, fail with right address format */
  601. ret = dev->ops->submit_io(dev, rqd);
  602. if (ret)
  603. nvm_rq_dev_to_tgt(tgt_dev, rqd);
  604. return ret;
  605. }
  606. EXPORT_SYMBOL(nvm_submit_io);
  607. int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  608. {
  609. struct nvm_dev *dev = tgt_dev->parent;
  610. int ret;
  611. if (!dev->ops->submit_io_sync)
  612. return -ENODEV;
  613. nvm_rq_tgt_to_dev(tgt_dev, rqd);
  614. rqd->dev = tgt_dev;
  615. rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
  616. /* In case of error, fail with right address format */
  617. ret = dev->ops->submit_io_sync(dev, rqd);
  618. nvm_rq_dev_to_tgt(tgt_dev, rqd);
  619. return ret;
  620. }
  621. EXPORT_SYMBOL(nvm_submit_io_sync);
  622. void nvm_end_io(struct nvm_rq *rqd)
  623. {
  624. struct nvm_tgt_dev *tgt_dev = rqd->dev;
  625. /* Convert address space */
  626. if (tgt_dev)
  627. nvm_rq_dev_to_tgt(tgt_dev, rqd);
  628. if (rqd->end_io)
  629. rqd->end_io(rqd);
  630. }
  631. EXPORT_SYMBOL(nvm_end_io);
  632. static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
  633. {
  634. if (!dev->ops->submit_io_sync)
  635. return -ENODEV;
  636. rqd->flags = nvm_set_flags(&dev->geo, rqd);
  637. return dev->ops->submit_io_sync(dev, rqd);
  638. }
  639. static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
  640. {
  641. struct nvm_rq rqd = { NULL };
  642. struct bio bio;
  643. struct bio_vec bio_vec;
  644. struct page *page;
  645. int ret;
  646. page = alloc_page(GFP_KERNEL);
  647. if (!page)
  648. return -ENOMEM;
  649. bio_init(&bio, &bio_vec, 1);
  650. bio_add_page(&bio, page, PAGE_SIZE, 0);
  651. bio_set_op_attrs(&bio, REQ_OP_READ, 0);
  652. rqd.bio = &bio;
  653. rqd.opcode = NVM_OP_PREAD;
  654. rqd.is_seq = 1;
  655. rqd.nr_ppas = 1;
  656. rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
  657. ret = nvm_submit_io_sync_raw(dev, &rqd);
  658. if (ret)
  659. return ret;
  660. __free_page(page);
  661. return rqd.error;
  662. }
  663. /*
  664. * Scans a 1.2 chunk first and last page to determine if its state.
  665. * If the chunk is found to be open, also scan it to update the write
  666. * pointer.
  667. */
  668. static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
  669. struct nvm_chk_meta *meta)
  670. {
  671. struct nvm_geo *geo = &dev->geo;
  672. int ret, pg, pl;
  673. /* sense first page */
  674. ret = nvm_bb_chunk_sense(dev, ppa);
  675. if (ret < 0) /* io error */
  676. return ret;
  677. else if (ret == 0) /* valid data */
  678. meta->state = NVM_CHK_ST_OPEN;
  679. else if (ret > 0) {
  680. /*
  681. * If empty page, the chunk is free, else it is an
  682. * actual io error. In that case, mark it offline.
  683. */
  684. switch (ret) {
  685. case NVM_RSP_ERR_EMPTYPAGE:
  686. meta->state = NVM_CHK_ST_FREE;
  687. return 0;
  688. case NVM_RSP_ERR_FAILCRC:
  689. case NVM_RSP_ERR_FAILECC:
  690. case NVM_RSP_WARN_HIGHECC:
  691. meta->state = NVM_CHK_ST_OPEN;
  692. goto scan;
  693. default:
  694. return -ret; /* other io error */
  695. }
  696. }
  697. /* sense last page */
  698. ppa.g.pg = geo->num_pg - 1;
  699. ppa.g.pl = geo->num_pln - 1;
  700. ret = nvm_bb_chunk_sense(dev, ppa);
  701. if (ret < 0) /* io error */
  702. return ret;
  703. else if (ret == 0) { /* Chunk fully written */
  704. meta->state = NVM_CHK_ST_CLOSED;
  705. meta->wp = geo->clba;
  706. return 0;
  707. } else if (ret > 0) {
  708. switch (ret) {
  709. case NVM_RSP_ERR_EMPTYPAGE:
  710. case NVM_RSP_ERR_FAILCRC:
  711. case NVM_RSP_ERR_FAILECC:
  712. case NVM_RSP_WARN_HIGHECC:
  713. meta->state = NVM_CHK_ST_OPEN;
  714. break;
  715. default:
  716. return -ret; /* other io error */
  717. }
  718. }
  719. scan:
  720. /*
  721. * chunk is open, we scan sequentially to update the write pointer.
  722. * We make the assumption that targets write data across all planes
  723. * before moving to the next page.
  724. */
  725. for (pg = 0; pg < geo->num_pg; pg++) {
  726. for (pl = 0; pl < geo->num_pln; pl++) {
  727. ppa.g.pg = pg;
  728. ppa.g.pl = pl;
  729. ret = nvm_bb_chunk_sense(dev, ppa);
  730. if (ret < 0) /* io error */
  731. return ret;
  732. else if (ret == 0) {
  733. meta->wp += geo->ws_min;
  734. } else if (ret > 0) {
  735. switch (ret) {
  736. case NVM_RSP_ERR_EMPTYPAGE:
  737. return 0;
  738. case NVM_RSP_ERR_FAILCRC:
  739. case NVM_RSP_ERR_FAILECC:
  740. case NVM_RSP_WARN_HIGHECC:
  741. meta->wp += geo->ws_min;
  742. break;
  743. default:
  744. return -ret; /* other io error */
  745. }
  746. }
  747. }
  748. }
  749. return 0;
  750. }
  751. /*
  752. * folds a bad block list from its plane representation to its
  753. * chunk representation.
  754. *
  755. * If any of the planes status are bad or grown bad, the chunk is marked
  756. * offline. If not bad, the first plane state acts as the chunk state.
  757. */
  758. static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
  759. u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
  760. {
  761. struct nvm_geo *geo = &dev->geo;
  762. int ret, blk, pl, offset, blktype;
  763. for (blk = 0; blk < geo->num_chk; blk++) {
  764. offset = blk * geo->pln_mode;
  765. blktype = blks[offset];
  766. for (pl = 0; pl < geo->pln_mode; pl++) {
  767. if (blks[offset + pl] &
  768. (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
  769. blktype = blks[offset + pl];
  770. break;
  771. }
  772. }
  773. ppa.g.blk = blk;
  774. meta->wp = 0;
  775. meta->type = NVM_CHK_TP_W_SEQ;
  776. meta->wi = 0;
  777. meta->slba = generic_to_dev_addr(dev, ppa).ppa;
  778. meta->cnlb = dev->geo.clba;
  779. if (blktype == NVM_BLK_T_FREE) {
  780. ret = nvm_bb_chunk_scan(dev, ppa, meta);
  781. if (ret)
  782. return ret;
  783. } else {
  784. meta->state = NVM_CHK_ST_OFFLINE;
  785. }
  786. meta++;
  787. }
  788. return 0;
  789. }
  790. static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
  791. int nchks, struct nvm_chk_meta *meta)
  792. {
  793. struct nvm_geo *geo = &dev->geo;
  794. struct ppa_addr ppa;
  795. u8 *blks;
  796. int ch, lun, nr_blks;
  797. int ret;
  798. ppa.ppa = slba;
  799. ppa = dev_to_generic_addr(dev, ppa);
  800. if (ppa.g.blk != 0)
  801. return -EINVAL;
  802. if ((nchks % geo->num_chk) != 0)
  803. return -EINVAL;
  804. nr_blks = geo->num_chk * geo->pln_mode;
  805. blks = kmalloc(nr_blks, GFP_KERNEL);
  806. if (!blks)
  807. return -ENOMEM;
  808. for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
  809. for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
  810. struct ppa_addr ppa_gen, ppa_dev;
  811. if (!nchks)
  812. goto done;
  813. ppa_gen.ppa = 0;
  814. ppa_gen.g.ch = ch;
  815. ppa_gen.g.lun = lun;
  816. ppa_dev = generic_to_dev_addr(dev, ppa_gen);
  817. ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
  818. if (ret)
  819. goto done;
  820. ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
  821. meta);
  822. if (ret)
  823. goto done;
  824. meta += geo->num_chk;
  825. nchks -= geo->num_chk;
  826. }
  827. }
  828. done:
  829. kfree(blks);
  830. return ret;
  831. }
  832. int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
  833. int nchks, struct nvm_chk_meta *meta)
  834. {
  835. struct nvm_dev *dev = tgt_dev->parent;
  836. nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
  837. if (dev->geo.version == NVM_OCSSD_SPEC_12)
  838. return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
  839. return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
  840. }
  841. EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
  842. int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
  843. int nr_ppas, int type)
  844. {
  845. struct nvm_dev *dev = tgt_dev->parent;
  846. struct nvm_rq rqd;
  847. int ret;
  848. if (dev->geo.version == NVM_OCSSD_SPEC_20)
  849. return 0;
  850. if (nr_ppas > NVM_MAX_VLBA) {
  851. pr_err("nvm: unable to update all blocks atomically\n");
  852. return -EINVAL;
  853. }
  854. memset(&rqd, 0, sizeof(struct nvm_rq));
  855. nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
  856. nvm_rq_tgt_to_dev(tgt_dev, &rqd);
  857. ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
  858. nvm_free_rqd_ppalist(tgt_dev, &rqd);
  859. if (ret)
  860. return -EINVAL;
  861. return 0;
  862. }
  863. EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
  864. static int nvm_core_init(struct nvm_dev *dev)
  865. {
  866. struct nvm_geo *geo = &dev->geo;
  867. int ret;
  868. dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
  869. sizeof(unsigned long), GFP_KERNEL);
  870. if (!dev->lun_map)
  871. return -ENOMEM;
  872. INIT_LIST_HEAD(&dev->area_list);
  873. INIT_LIST_HEAD(&dev->targets);
  874. mutex_init(&dev->mlock);
  875. spin_lock_init(&dev->lock);
  876. ret = nvm_register_map(dev);
  877. if (ret)
  878. goto err_fmtype;
  879. return 0;
  880. err_fmtype:
  881. kfree(dev->lun_map);
  882. return ret;
  883. }
  884. static void nvm_free(struct nvm_dev *dev)
  885. {
  886. if (!dev)
  887. return;
  888. if (dev->dma_pool)
  889. dev->ops->destroy_dma_pool(dev->dma_pool);
  890. nvm_unregister_map(dev);
  891. kfree(dev->lun_map);
  892. kfree(dev);
  893. }
  894. static int nvm_init(struct nvm_dev *dev)
  895. {
  896. struct nvm_geo *geo = &dev->geo;
  897. int ret = -EINVAL;
  898. if (dev->ops->identity(dev)) {
  899. pr_err("nvm: device could not be identified\n");
  900. goto err;
  901. }
  902. pr_debug("nvm: ver:%u.%u nvm_vendor:%x\n",
  903. geo->major_ver_id, geo->minor_ver_id,
  904. geo->vmnt);
  905. ret = nvm_core_init(dev);
  906. if (ret) {
  907. pr_err("nvm: could not initialize core structures.\n");
  908. goto err;
  909. }
  910. pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n",
  911. dev->name, dev->geo.ws_min, dev->geo.ws_opt,
  912. dev->geo.num_chk, dev->geo.all_luns,
  913. dev->geo.num_ch);
  914. return 0;
  915. err:
  916. pr_err("nvm: failed to initialize nvm\n");
  917. return ret;
  918. }
  919. struct nvm_dev *nvm_alloc_dev(int node)
  920. {
  921. return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
  922. }
  923. EXPORT_SYMBOL(nvm_alloc_dev);
  924. int nvm_register(struct nvm_dev *dev)
  925. {
  926. int ret;
  927. if (!dev->q || !dev->ops)
  928. return -EINVAL;
  929. dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
  930. if (!dev->dma_pool) {
  931. pr_err("nvm: could not create dma pool\n");
  932. return -ENOMEM;
  933. }
  934. ret = nvm_init(dev);
  935. if (ret)
  936. goto err_init;
  937. /* register device with a supported media manager */
  938. down_write(&nvm_lock);
  939. list_add(&dev->devices, &nvm_devices);
  940. up_write(&nvm_lock);
  941. return 0;
  942. err_init:
  943. dev->ops->destroy_dma_pool(dev->dma_pool);
  944. return ret;
  945. }
  946. EXPORT_SYMBOL(nvm_register);
  947. void nvm_unregister(struct nvm_dev *dev)
  948. {
  949. struct nvm_target *t, *tmp;
  950. mutex_lock(&dev->mlock);
  951. list_for_each_entry_safe(t, tmp, &dev->targets, list) {
  952. if (t->dev->parent != dev)
  953. continue;
  954. __nvm_remove_target(t, false);
  955. }
  956. mutex_unlock(&dev->mlock);
  957. down_write(&nvm_lock);
  958. list_del(&dev->devices);
  959. up_write(&nvm_lock);
  960. nvm_free(dev);
  961. }
  962. EXPORT_SYMBOL(nvm_unregister);
  963. static int __nvm_configure_create(struct nvm_ioctl_create *create)
  964. {
  965. struct nvm_dev *dev;
  966. down_write(&nvm_lock);
  967. dev = nvm_find_nvm_dev(create->dev);
  968. up_write(&nvm_lock);
  969. if (!dev) {
  970. pr_err("nvm: device not found\n");
  971. return -EINVAL;
  972. }
  973. return nvm_create_tgt(dev, create);
  974. }
  975. static long nvm_ioctl_info(struct file *file, void __user *arg)
  976. {
  977. struct nvm_ioctl_info *info;
  978. struct nvm_tgt_type *tt;
  979. int tgt_iter = 0;
  980. info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
  981. if (IS_ERR(info))
  982. return -EFAULT;
  983. info->version[0] = NVM_VERSION_MAJOR;
  984. info->version[1] = NVM_VERSION_MINOR;
  985. info->version[2] = NVM_VERSION_PATCH;
  986. down_write(&nvm_tgtt_lock);
  987. list_for_each_entry(tt, &nvm_tgt_types, list) {
  988. struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
  989. tgt->version[0] = tt->version[0];
  990. tgt->version[1] = tt->version[1];
  991. tgt->version[2] = tt->version[2];
  992. strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
  993. tgt_iter++;
  994. }
  995. info->tgtsize = tgt_iter;
  996. up_write(&nvm_tgtt_lock);
  997. if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
  998. kfree(info);
  999. return -EFAULT;
  1000. }
  1001. kfree(info);
  1002. return 0;
  1003. }
  1004. static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
  1005. {
  1006. struct nvm_ioctl_get_devices *devices;
  1007. struct nvm_dev *dev;
  1008. int i = 0;
  1009. devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
  1010. if (!devices)
  1011. return -ENOMEM;
  1012. down_write(&nvm_lock);
  1013. list_for_each_entry(dev, &nvm_devices, devices) {
  1014. struct nvm_ioctl_device_info *info = &devices->info[i];
  1015. strlcpy(info->devname, dev->name, sizeof(info->devname));
  1016. /* kept for compatibility */
  1017. info->bmversion[0] = 1;
  1018. info->bmversion[1] = 0;
  1019. info->bmversion[2] = 0;
  1020. strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
  1021. i++;
  1022. if (i > 31) {
  1023. pr_err("nvm: max 31 devices can be reported.\n");
  1024. break;
  1025. }
  1026. }
  1027. up_write(&nvm_lock);
  1028. devices->nr_devices = i;
  1029. if (copy_to_user(arg, devices,
  1030. sizeof(struct nvm_ioctl_get_devices))) {
  1031. kfree(devices);
  1032. return -EFAULT;
  1033. }
  1034. kfree(devices);
  1035. return 0;
  1036. }
  1037. static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
  1038. {
  1039. struct nvm_ioctl_create create;
  1040. if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
  1041. return -EFAULT;
  1042. if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
  1043. create.conf.e.rsv != 0) {
  1044. pr_err("nvm: reserved config field in use\n");
  1045. return -EINVAL;
  1046. }
  1047. create.dev[DISK_NAME_LEN - 1] = '\0';
  1048. create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
  1049. create.tgtname[DISK_NAME_LEN - 1] = '\0';
  1050. if (create.flags != 0) {
  1051. __u32 flags = create.flags;
  1052. /* Check for valid flags */
  1053. if (flags & NVM_TARGET_FACTORY)
  1054. flags &= ~NVM_TARGET_FACTORY;
  1055. if (flags) {
  1056. pr_err("nvm: flag not supported\n");
  1057. return -EINVAL;
  1058. }
  1059. }
  1060. return __nvm_configure_create(&create);
  1061. }
  1062. static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
  1063. {
  1064. struct nvm_ioctl_remove remove;
  1065. struct nvm_dev *dev;
  1066. int ret = 0;
  1067. if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
  1068. return -EFAULT;
  1069. remove.tgtname[DISK_NAME_LEN - 1] = '\0';
  1070. if (remove.flags != 0) {
  1071. pr_err("nvm: no flags supported\n");
  1072. return -EINVAL;
  1073. }
  1074. list_for_each_entry(dev, &nvm_devices, devices) {
  1075. ret = nvm_remove_tgt(dev, &remove);
  1076. if (!ret)
  1077. break;
  1078. }
  1079. return ret;
  1080. }
  1081. /* kept for compatibility reasons */
  1082. static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
  1083. {
  1084. struct nvm_ioctl_dev_init init;
  1085. if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
  1086. return -EFAULT;
  1087. if (init.flags != 0) {
  1088. pr_err("nvm: no flags supported\n");
  1089. return -EINVAL;
  1090. }
  1091. return 0;
  1092. }
  1093. /* Kept for compatibility reasons */
  1094. static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
  1095. {
  1096. struct nvm_ioctl_dev_factory fact;
  1097. if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
  1098. return -EFAULT;
  1099. fact.dev[DISK_NAME_LEN - 1] = '\0';
  1100. if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
  1101. return -EINVAL;
  1102. return 0;
  1103. }
  1104. static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
  1105. {
  1106. void __user *argp = (void __user *)arg;
  1107. if (!capable(CAP_SYS_ADMIN))
  1108. return -EPERM;
  1109. switch (cmd) {
  1110. case NVM_INFO:
  1111. return nvm_ioctl_info(file, argp);
  1112. case NVM_GET_DEVICES:
  1113. return nvm_ioctl_get_devices(file, argp);
  1114. case NVM_DEV_CREATE:
  1115. return nvm_ioctl_dev_create(file, argp);
  1116. case NVM_DEV_REMOVE:
  1117. return nvm_ioctl_dev_remove(file, argp);
  1118. case NVM_DEV_INIT:
  1119. return nvm_ioctl_dev_init(file, argp);
  1120. case NVM_DEV_FACTORY:
  1121. return nvm_ioctl_dev_factory(file, argp);
  1122. }
  1123. return 0;
  1124. }
  1125. static const struct file_operations _ctl_fops = {
  1126. .open = nonseekable_open,
  1127. .unlocked_ioctl = nvm_ctl_ioctl,
  1128. .owner = THIS_MODULE,
  1129. .llseek = noop_llseek,
  1130. };
  1131. static struct miscdevice _nvm_misc = {
  1132. .minor = MISC_DYNAMIC_MINOR,
  1133. .name = "lightnvm",
  1134. .nodename = "lightnvm/control",
  1135. .fops = &_ctl_fops,
  1136. };
  1137. builtin_misc_device(_nvm_misc);