core.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
  3. * Initial release: Matias Bjorling <m@bjorling.me>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version
  7. * 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; see the file COPYING. If not, write to
  16. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  17. * USA.
  18. *
  19. */
  20. #include <linux/list.h>
  21. #include <linux/types.h>
  22. #include <linux/sem.h>
  23. #include <linux/bitmap.h>
  24. #include <linux/module.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/lightnvm.h>
  27. #include <linux/sched/sysctl.h>
  28. static LIST_HEAD(nvm_tgt_types);
  29. static DECLARE_RWSEM(nvm_tgtt_lock);
  30. static LIST_HEAD(nvm_mgrs);
  31. static LIST_HEAD(nvm_devices);
  32. static DECLARE_RWSEM(nvm_lock);
  33. struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
  34. {
  35. struct nvm_tgt_type *tmp, *tt = NULL;
  36. if (lock)
  37. down_write(&nvm_tgtt_lock);
  38. list_for_each_entry(tmp, &nvm_tgt_types, list)
  39. if (!strcmp(name, tmp->name)) {
  40. tt = tmp;
  41. break;
  42. }
  43. if (lock)
  44. up_write(&nvm_tgtt_lock);
  45. return tt;
  46. }
  47. EXPORT_SYMBOL(nvm_find_target_type);
  48. int nvm_register_tgt_type(struct nvm_tgt_type *tt)
  49. {
  50. int ret = 0;
  51. down_write(&nvm_tgtt_lock);
  52. if (nvm_find_target_type(tt->name, 0))
  53. ret = -EEXIST;
  54. else
  55. list_add(&tt->list, &nvm_tgt_types);
  56. up_write(&nvm_tgtt_lock);
  57. return ret;
  58. }
  59. EXPORT_SYMBOL(nvm_register_tgt_type);
  60. void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
  61. {
  62. if (!tt)
  63. return;
  64. down_write(&nvm_lock);
  65. list_del(&tt->list);
  66. up_write(&nvm_lock);
  67. }
  68. EXPORT_SYMBOL(nvm_unregister_tgt_type);
  69. void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
  70. dma_addr_t *dma_handler)
  71. {
  72. return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
  73. dma_handler);
  74. }
  75. EXPORT_SYMBOL(nvm_dev_dma_alloc);
  76. void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
  77. dma_addr_t dma_handler)
  78. {
  79. dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
  80. }
  81. EXPORT_SYMBOL(nvm_dev_dma_free);
  82. static struct nvmm_type *nvm_find_mgr_type(const char *name)
  83. {
  84. struct nvmm_type *mt;
  85. list_for_each_entry(mt, &nvm_mgrs, list)
  86. if (!strcmp(name, mt->name))
  87. return mt;
  88. return NULL;
  89. }
  90. static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
  91. {
  92. struct nvmm_type *mt;
  93. int ret;
  94. lockdep_assert_held(&nvm_lock);
  95. list_for_each_entry(mt, &nvm_mgrs, list) {
  96. if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
  97. continue;
  98. ret = mt->register_mgr(dev);
  99. if (ret < 0) {
  100. pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
  101. ret, dev->name);
  102. return NULL; /* initialization failed */
  103. } else if (ret > 0)
  104. return mt;
  105. }
  106. return NULL;
  107. }
  108. int nvm_register_mgr(struct nvmm_type *mt)
  109. {
  110. struct nvm_dev *dev;
  111. int ret = 0;
  112. down_write(&nvm_lock);
  113. if (nvm_find_mgr_type(mt->name)) {
  114. ret = -EEXIST;
  115. goto finish;
  116. } else {
  117. list_add(&mt->list, &nvm_mgrs);
  118. }
  119. /* try to register media mgr if any device have none configured */
  120. list_for_each_entry(dev, &nvm_devices, devices) {
  121. if (dev->mt)
  122. continue;
  123. dev->mt = nvm_init_mgr(dev);
  124. }
  125. finish:
  126. up_write(&nvm_lock);
  127. return ret;
  128. }
  129. EXPORT_SYMBOL(nvm_register_mgr);
  130. void nvm_unregister_mgr(struct nvmm_type *mt)
  131. {
  132. if (!mt)
  133. return;
  134. down_write(&nvm_lock);
  135. list_del(&mt->list);
  136. up_write(&nvm_lock);
  137. }
  138. EXPORT_SYMBOL(nvm_unregister_mgr);
  139. static struct nvm_dev *nvm_find_nvm_dev(const char *name)
  140. {
  141. struct nvm_dev *dev;
  142. list_for_each_entry(dev, &nvm_devices, devices)
  143. if (!strcmp(name, dev->name))
  144. return dev;
  145. return NULL;
  146. }
  147. struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
  148. unsigned long flags)
  149. {
  150. return dev->mt->get_blk(dev, lun, flags);
  151. }
  152. EXPORT_SYMBOL(nvm_get_blk);
  153. /* Assumes that all valid pages have already been moved on release to bm */
  154. void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
  155. {
  156. return dev->mt->put_blk(dev, blk);
  157. }
  158. EXPORT_SYMBOL(nvm_put_blk);
  159. void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
  160. {
  161. return dev->mt->mark_blk(dev, ppa, type);
  162. }
  163. EXPORT_SYMBOL(nvm_mark_blk);
  164. int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
  165. {
  166. return dev->mt->submit_io(dev, rqd);
  167. }
  168. EXPORT_SYMBOL(nvm_submit_io);
  169. int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
  170. {
  171. return dev->mt->erase_blk(dev, blk, 0);
  172. }
  173. EXPORT_SYMBOL(nvm_erase_blk);
  174. void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
  175. {
  176. int i;
  177. if (rqd->nr_ppas > 1) {
  178. for (i = 0; i < rqd->nr_ppas; i++)
  179. rqd->ppa_list[i] = dev_to_generic_addr(dev,
  180. rqd->ppa_list[i]);
  181. } else {
  182. rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
  183. }
  184. }
  185. EXPORT_SYMBOL(nvm_addr_to_generic_mode);
  186. void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
  187. {
  188. int i;
  189. if (rqd->nr_ppas > 1) {
  190. for (i = 0; i < rqd->nr_ppas; i++)
  191. rqd->ppa_list[i] = generic_to_dev_addr(dev,
  192. rqd->ppa_list[i]);
  193. } else {
  194. rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
  195. }
  196. }
  197. EXPORT_SYMBOL(nvm_generic_to_addr_mode);
  198. int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
  199. const struct ppa_addr *ppas, int nr_ppas, int vblk)
  200. {
  201. int i, plane_cnt, pl_idx;
  202. struct ppa_addr ppa;
  203. if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
  204. rqd->nr_ppas = nr_ppas;
  205. rqd->ppa_addr = ppas[0];
  206. return 0;
  207. }
  208. rqd->nr_ppas = nr_ppas;
  209. rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
  210. if (!rqd->ppa_list) {
  211. pr_err("nvm: failed to allocate dma memory\n");
  212. return -ENOMEM;
  213. }
  214. if (!vblk) {
  215. for (i = 0; i < nr_ppas; i++)
  216. rqd->ppa_list[i] = ppas[i];
  217. } else {
  218. plane_cnt = dev->plane_mode;
  219. rqd->nr_ppas *= plane_cnt;
  220. for (i = 0; i < nr_ppas; i++) {
  221. for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
  222. ppa = ppas[i];
  223. ppa.g.pl = pl_idx;
  224. rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
  225. }
  226. }
  227. }
  228. return 0;
  229. }
  230. EXPORT_SYMBOL(nvm_set_rqd_ppalist);
  231. void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
  232. {
  233. if (!rqd->ppa_list)
  234. return;
  235. nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
  236. }
  237. EXPORT_SYMBOL(nvm_free_rqd_ppalist);
  238. int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
  239. {
  240. struct nvm_rq rqd;
  241. int ret;
  242. if (!dev->ops->erase_block)
  243. return 0;
  244. memset(&rqd, 0, sizeof(struct nvm_rq));
  245. ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
  246. if (ret)
  247. return ret;
  248. nvm_generic_to_addr_mode(dev, &rqd);
  249. ret = dev->ops->erase_block(dev, &rqd);
  250. nvm_free_rqd_ppalist(dev, &rqd);
  251. return ret;
  252. }
  253. EXPORT_SYMBOL(nvm_erase_ppa);
  254. void nvm_end_io(struct nvm_rq *rqd, int error)
  255. {
  256. rqd->error = error;
  257. rqd->end_io(rqd);
  258. }
  259. EXPORT_SYMBOL(nvm_end_io);
  260. static void nvm_end_io_sync(struct nvm_rq *rqd)
  261. {
  262. struct completion *waiting = rqd->wait;
  263. rqd->wait = NULL;
  264. complete(waiting);
  265. }
  266. static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
  267. int flags, void *buf, int len)
  268. {
  269. DECLARE_COMPLETION_ONSTACK(wait);
  270. struct bio *bio;
  271. int ret;
  272. unsigned long hang_check;
  273. bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
  274. if (IS_ERR_OR_NULL(bio))
  275. return -ENOMEM;
  276. nvm_generic_to_addr_mode(dev, rqd);
  277. rqd->dev = dev;
  278. rqd->opcode = opcode;
  279. rqd->flags = flags;
  280. rqd->bio = bio;
  281. rqd->wait = &wait;
  282. rqd->end_io = nvm_end_io_sync;
  283. ret = dev->ops->submit_io(dev, rqd);
  284. if (ret) {
  285. bio_put(bio);
  286. return ret;
  287. }
  288. /* Prevent hang_check timer from firing at us during very long I/O */
  289. hang_check = sysctl_hung_task_timeout_secs;
  290. if (hang_check)
  291. while (!wait_for_completion_io_timeout(&wait,
  292. hang_check * (HZ/2)))
  293. ;
  294. else
  295. wait_for_completion_io(&wait);
  296. return rqd->error;
  297. }
  298. /**
  299. * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
  300. * take to free ppa list if necessary.
  301. * @dev: device
  302. * @ppa_list: user created ppa_list
  303. * @nr_ppas: length of ppa_list
  304. * @opcode: device opcode
  305. * @flags: device flags
  306. * @buf: data buffer
  307. * @len: data buffer length
  308. */
  309. int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
  310. int nr_ppas, int opcode, int flags, void *buf, int len)
  311. {
  312. struct nvm_rq rqd;
  313. if (dev->ops->max_phys_sect < nr_ppas)
  314. return -EINVAL;
  315. memset(&rqd, 0, sizeof(struct nvm_rq));
  316. rqd.nr_ppas = nr_ppas;
  317. if (nr_ppas > 1)
  318. rqd.ppa_list = ppa_list;
  319. else
  320. rqd.ppa_addr = ppa_list[0];
  321. return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
  322. }
  323. EXPORT_SYMBOL(nvm_submit_ppa_list);
  324. /**
  325. * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
  326. * as single, dual, quad plane PPAs depending on device type.
  327. * @dev: device
  328. * @ppa: user created ppa_list
  329. * @nr_ppas: length of ppa_list
  330. * @opcode: device opcode
  331. * @flags: device flags
  332. * @buf: data buffer
  333. * @len: data buffer length
  334. */
  335. int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
  336. int opcode, int flags, void *buf, int len)
  337. {
  338. struct nvm_rq rqd;
  339. int ret;
  340. memset(&rqd, 0, sizeof(struct nvm_rq));
  341. ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
  342. if (ret)
  343. return ret;
  344. ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
  345. nvm_free_rqd_ppalist(dev, &rqd);
  346. return ret;
  347. }
  348. EXPORT_SYMBOL(nvm_submit_ppa);
  349. /*
  350. * folds a bad block list from its plane representation to its virtual
  351. * block representation. The fold is done in place and reduced size is
  352. * returned.
  353. *
  354. * If any of the planes status are bad or grown bad block, the virtual block
  355. * is marked bad. If not bad, the first plane state acts as the block state.
  356. */
  357. int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
  358. {
  359. int blk, offset, pl, blktype;
  360. if (nr_blks != dev->blks_per_lun * dev->plane_mode)
  361. return -EINVAL;
  362. for (blk = 0; blk < dev->blks_per_lun; blk++) {
  363. offset = blk * dev->plane_mode;
  364. blktype = blks[offset];
  365. /* Bad blocks on any planes take precedence over other types */
  366. for (pl = 0; pl < dev->plane_mode; pl++) {
  367. if (blks[offset + pl] &
  368. (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
  369. blktype = blks[offset + pl];
  370. break;
  371. }
  372. }
  373. blks[blk] = blktype;
  374. }
  375. return dev->blks_per_lun;
  376. }
  377. EXPORT_SYMBOL(nvm_bb_tbl_fold);
  378. int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
  379. {
  380. ppa = generic_to_dev_addr(dev, ppa);
  381. return dev->ops->get_bb_tbl(dev, ppa, blks);
  382. }
  383. EXPORT_SYMBOL(nvm_get_bb_tbl);
  384. static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
  385. {
  386. int i;
  387. dev->lps_per_blk = dev->pgs_per_blk;
  388. dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
  389. if (!dev->lptbl)
  390. return -ENOMEM;
  391. /* Just a linear array */
  392. for (i = 0; i < dev->lps_per_blk; i++)
  393. dev->lptbl[i] = i;
  394. return 0;
  395. }
  396. static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
  397. {
  398. int i, p;
  399. struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
  400. if (!mlc->num_pairs)
  401. return 0;
  402. dev->lps_per_blk = mlc->num_pairs;
  403. dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
  404. if (!dev->lptbl)
  405. return -ENOMEM;
  406. /* The lower page table encoding consists of a list of bytes, where each
  407. * has a lower and an upper half. The first half byte maintains the
  408. * increment value and every value after is an offset added to the
  409. * previous incrementation value
  410. */
  411. dev->lptbl[0] = mlc->pairs[0] & 0xF;
  412. for (i = 1; i < dev->lps_per_blk; i++) {
  413. p = mlc->pairs[i >> 1];
  414. if (i & 0x1) /* upper */
  415. dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
  416. else /* lower */
  417. dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
  418. }
  419. return 0;
  420. }
  421. static int nvm_core_init(struct nvm_dev *dev)
  422. {
  423. struct nvm_id *id = &dev->identity;
  424. struct nvm_id_group *grp = &id->groups[0];
  425. int ret;
  426. /* device values */
  427. dev->nr_chnls = grp->num_ch;
  428. dev->luns_per_chnl = grp->num_lun;
  429. dev->pgs_per_blk = grp->num_pg;
  430. dev->blks_per_lun = grp->num_blk;
  431. dev->nr_planes = grp->num_pln;
  432. dev->fpg_size = grp->fpg_sz;
  433. dev->pfpg_size = grp->fpg_sz * grp->num_pln;
  434. dev->sec_size = grp->csecs;
  435. dev->oob_size = grp->sos;
  436. dev->sec_per_pg = grp->fpg_sz / grp->csecs;
  437. dev->mccap = grp->mccap;
  438. memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
  439. dev->plane_mode = NVM_PLANE_SINGLE;
  440. dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
  441. if (grp->mpos & 0x020202)
  442. dev->plane_mode = NVM_PLANE_DOUBLE;
  443. if (grp->mpos & 0x040404)
  444. dev->plane_mode = NVM_PLANE_QUAD;
  445. if (grp->mtype != 0) {
  446. pr_err("nvm: memory type not supported\n");
  447. return -EINVAL;
  448. }
  449. /* calculated values */
  450. dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
  451. dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
  452. dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
  453. dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
  454. dev->total_secs = dev->nr_luns * dev->sec_per_lun;
  455. dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
  456. sizeof(unsigned long), GFP_KERNEL);
  457. if (!dev->lun_map)
  458. return -ENOMEM;
  459. switch (grp->fmtype) {
  460. case NVM_ID_FMTYPE_SLC:
  461. if (nvm_init_slc_tbl(dev, grp)) {
  462. ret = -ENOMEM;
  463. goto err_fmtype;
  464. }
  465. break;
  466. case NVM_ID_FMTYPE_MLC:
  467. if (nvm_init_mlc_tbl(dev, grp)) {
  468. ret = -ENOMEM;
  469. goto err_fmtype;
  470. }
  471. break;
  472. default:
  473. pr_err("nvm: flash type not supported\n");
  474. ret = -EINVAL;
  475. goto err_fmtype;
  476. }
  477. mutex_init(&dev->mlock);
  478. spin_lock_init(&dev->lock);
  479. return 0;
  480. err_fmtype:
  481. kfree(dev->lun_map);
  482. return ret;
  483. }
  484. static void nvm_free_mgr(struct nvm_dev *dev)
  485. {
  486. if (!dev->mt)
  487. return;
  488. dev->mt->unregister_mgr(dev);
  489. dev->mt = NULL;
  490. }
  491. static void nvm_free(struct nvm_dev *dev)
  492. {
  493. if (!dev)
  494. return;
  495. nvm_free_mgr(dev);
  496. kfree(dev->lptbl);
  497. kfree(dev->lun_map);
  498. }
  499. static int nvm_init(struct nvm_dev *dev)
  500. {
  501. int ret = -EINVAL;
  502. if (!dev->q || !dev->ops)
  503. return ret;
  504. if (dev->ops->identity(dev, &dev->identity)) {
  505. pr_err("nvm: device could not be identified\n");
  506. goto err;
  507. }
  508. pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
  509. dev->identity.ver_id, dev->identity.vmnt,
  510. dev->identity.cgrps);
  511. if (dev->identity.ver_id != 1) {
  512. pr_err("nvm: device not supported by kernel.");
  513. goto err;
  514. }
  515. if (dev->identity.cgrps != 1) {
  516. pr_err("nvm: only one group configuration supported.");
  517. goto err;
  518. }
  519. ret = nvm_core_init(dev);
  520. if (ret) {
  521. pr_err("nvm: could not initialize core structures.\n");
  522. goto err;
  523. }
  524. pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
  525. dev->name, dev->sec_per_pg, dev->nr_planes,
  526. dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
  527. dev->nr_chnls);
  528. return 0;
  529. err:
  530. pr_err("nvm: failed to initialize nvm\n");
  531. return ret;
  532. }
  533. static void nvm_exit(struct nvm_dev *dev)
  534. {
  535. if (dev->dma_pool)
  536. dev->ops->destroy_dma_pool(dev->dma_pool);
  537. nvm_free(dev);
  538. pr_info("nvm: successfully unloaded\n");
  539. }
  540. int nvm_register(struct request_queue *q, char *disk_name,
  541. struct nvm_dev_ops *ops)
  542. {
  543. struct nvm_dev *dev;
  544. int ret;
  545. if (!ops->identity)
  546. return -EINVAL;
  547. dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
  548. if (!dev)
  549. return -ENOMEM;
  550. dev->q = q;
  551. dev->ops = ops;
  552. strncpy(dev->name, disk_name, DISK_NAME_LEN);
  553. ret = nvm_init(dev);
  554. if (ret)
  555. goto err_init;
  556. if (dev->ops->max_phys_sect > 256) {
  557. pr_info("nvm: max sectors supported is 256.\n");
  558. ret = -EINVAL;
  559. goto err_init;
  560. }
  561. if (dev->ops->max_phys_sect > 1) {
  562. dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
  563. if (!dev->dma_pool) {
  564. pr_err("nvm: could not create dma pool\n");
  565. ret = -ENOMEM;
  566. goto err_init;
  567. }
  568. }
  569. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
  570. ret = nvm_get_sysblock(dev, &dev->sb);
  571. if (!ret)
  572. pr_err("nvm: device not initialized.\n");
  573. else if (ret < 0)
  574. pr_err("nvm: err (%d) on device initialization\n", ret);
  575. }
  576. /* register device with a supported media manager */
  577. down_write(&nvm_lock);
  578. if (ret > 0)
  579. dev->mt = nvm_init_mgr(dev);
  580. list_add(&dev->devices, &nvm_devices);
  581. up_write(&nvm_lock);
  582. return 0;
  583. err_init:
  584. kfree(dev->lun_map);
  585. kfree(dev);
  586. return ret;
  587. }
  588. EXPORT_SYMBOL(nvm_register);
  589. void nvm_unregister(char *disk_name)
  590. {
  591. struct nvm_dev *dev;
  592. down_write(&nvm_lock);
  593. dev = nvm_find_nvm_dev(disk_name);
  594. if (!dev) {
  595. pr_err("nvm: could not find device %s to unregister\n",
  596. disk_name);
  597. up_write(&nvm_lock);
  598. return;
  599. }
  600. list_del(&dev->devices);
  601. up_write(&nvm_lock);
  602. nvm_exit(dev);
  603. kfree(dev);
  604. }
  605. EXPORT_SYMBOL(nvm_unregister);
  606. static int __nvm_configure_create(struct nvm_ioctl_create *create)
  607. {
  608. struct nvm_dev *dev;
  609. struct nvm_ioctl_create_simple *s;
  610. down_write(&nvm_lock);
  611. dev = nvm_find_nvm_dev(create->dev);
  612. up_write(&nvm_lock);
  613. if (!dev) {
  614. pr_err("nvm: device not found\n");
  615. return -EINVAL;
  616. }
  617. if (!dev->mt) {
  618. pr_info("nvm: device has no media manager registered.\n");
  619. return -ENODEV;
  620. }
  621. if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
  622. pr_err("nvm: config type not valid\n");
  623. return -EINVAL;
  624. }
  625. s = &create->conf.s;
  626. if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
  627. pr_err("nvm: lun out of bound (%u:%u > %u)\n",
  628. s->lun_begin, s->lun_end, dev->nr_luns);
  629. return -EINVAL;
  630. }
  631. return dev->mt->create_tgt(dev, create);
  632. }
  633. #ifdef CONFIG_NVM_DEBUG
  634. static int nvm_configure_show(const char *val)
  635. {
  636. struct nvm_dev *dev;
  637. char opcode, devname[DISK_NAME_LEN];
  638. int ret;
  639. ret = sscanf(val, "%c %32s", &opcode, devname);
  640. if (ret != 2) {
  641. pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
  642. return -EINVAL;
  643. }
  644. down_write(&nvm_lock);
  645. dev = nvm_find_nvm_dev(devname);
  646. up_write(&nvm_lock);
  647. if (!dev) {
  648. pr_err("nvm: device not found\n");
  649. return -EINVAL;
  650. }
  651. if (!dev->mt)
  652. return 0;
  653. dev->mt->lun_info_print(dev);
  654. return 0;
  655. }
  656. static int nvm_configure_remove(const char *val)
  657. {
  658. struct nvm_ioctl_remove remove;
  659. struct nvm_dev *dev;
  660. char opcode;
  661. int ret = 0;
  662. ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
  663. if (ret != 2) {
  664. pr_err("nvm: invalid command. Use \"d targetname\".\n");
  665. return -EINVAL;
  666. }
  667. remove.flags = 0;
  668. list_for_each_entry(dev, &nvm_devices, devices) {
  669. ret = dev->mt->remove_tgt(dev, &remove);
  670. if (!ret)
  671. break;
  672. }
  673. return ret;
  674. }
  675. static int nvm_configure_create(const char *val)
  676. {
  677. struct nvm_ioctl_create create;
  678. char opcode;
  679. int lun_begin, lun_end, ret;
  680. ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
  681. create.tgtname, create.tgttype,
  682. &lun_begin, &lun_end);
  683. if (ret != 6) {
  684. pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
  685. return -EINVAL;
  686. }
  687. create.flags = 0;
  688. create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
  689. create.conf.s.lun_begin = lun_begin;
  690. create.conf.s.lun_end = lun_end;
  691. return __nvm_configure_create(&create);
  692. }
  693. /* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
  694. static int nvm_configure_by_str_event(const char *val,
  695. const struct kernel_param *kp)
  696. {
  697. char opcode;
  698. int ret;
  699. ret = sscanf(val, "%c", &opcode);
  700. if (ret != 1) {
  701. pr_err("nvm: string must have the format of \"cmd ...\"\n");
  702. return -EINVAL;
  703. }
  704. switch (opcode) {
  705. case 'a':
  706. return nvm_configure_create(val);
  707. case 'd':
  708. return nvm_configure_remove(val);
  709. case 's':
  710. return nvm_configure_show(val);
  711. default:
  712. pr_err("nvm: invalid command\n");
  713. return -EINVAL;
  714. }
  715. return 0;
  716. }
  717. static int nvm_configure_get(char *buf, const struct kernel_param *kp)
  718. {
  719. int sz;
  720. struct nvm_dev *dev;
  721. sz = sprintf(buf, "available devices:\n");
  722. down_write(&nvm_lock);
  723. list_for_each_entry(dev, &nvm_devices, devices) {
  724. if (sz > 4095 - DISK_NAME_LEN - 2)
  725. break;
  726. sz += sprintf(buf + sz, " %32s\n", dev->name);
  727. }
  728. up_write(&nvm_lock);
  729. return sz;
  730. }
  731. static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
  732. .set = nvm_configure_by_str_event,
  733. .get = nvm_configure_get,
  734. };
  735. #undef MODULE_PARAM_PREFIX
  736. #define MODULE_PARAM_PREFIX "lnvm."
  737. module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
  738. 0644);
  739. #endif /* CONFIG_NVM_DEBUG */
  740. static long nvm_ioctl_info(struct file *file, void __user *arg)
  741. {
  742. struct nvm_ioctl_info *info;
  743. struct nvm_tgt_type *tt;
  744. int tgt_iter = 0;
  745. if (!capable(CAP_SYS_ADMIN))
  746. return -EPERM;
  747. info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
  748. if (IS_ERR(info))
  749. return -EFAULT;
  750. info->version[0] = NVM_VERSION_MAJOR;
  751. info->version[1] = NVM_VERSION_MINOR;
  752. info->version[2] = NVM_VERSION_PATCH;
  753. down_write(&nvm_lock);
  754. list_for_each_entry(tt, &nvm_tgt_types, list) {
  755. struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
  756. tgt->version[0] = tt->version[0];
  757. tgt->version[1] = tt->version[1];
  758. tgt->version[2] = tt->version[2];
  759. strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
  760. tgt_iter++;
  761. }
  762. info->tgtsize = tgt_iter;
  763. up_write(&nvm_lock);
  764. if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
  765. kfree(info);
  766. return -EFAULT;
  767. }
  768. kfree(info);
  769. return 0;
  770. }
  771. static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
  772. {
  773. struct nvm_ioctl_get_devices *devices;
  774. struct nvm_dev *dev;
  775. int i = 0;
  776. if (!capable(CAP_SYS_ADMIN))
  777. return -EPERM;
  778. devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
  779. if (!devices)
  780. return -ENOMEM;
  781. down_write(&nvm_lock);
  782. list_for_each_entry(dev, &nvm_devices, devices) {
  783. struct nvm_ioctl_device_info *info = &devices->info[i];
  784. sprintf(info->devname, "%s", dev->name);
  785. if (dev->mt) {
  786. info->bmversion[0] = dev->mt->version[0];
  787. info->bmversion[1] = dev->mt->version[1];
  788. info->bmversion[2] = dev->mt->version[2];
  789. sprintf(info->bmname, "%s", dev->mt->name);
  790. } else {
  791. sprintf(info->bmname, "none");
  792. }
  793. i++;
  794. if (i > 31) {
  795. pr_err("nvm: max 31 devices can be reported.\n");
  796. break;
  797. }
  798. }
  799. up_write(&nvm_lock);
  800. devices->nr_devices = i;
  801. if (copy_to_user(arg, devices,
  802. sizeof(struct nvm_ioctl_get_devices))) {
  803. kfree(devices);
  804. return -EFAULT;
  805. }
  806. kfree(devices);
  807. return 0;
  808. }
  809. static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
  810. {
  811. struct nvm_ioctl_create create;
  812. if (!capable(CAP_SYS_ADMIN))
  813. return -EPERM;
  814. if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
  815. return -EFAULT;
  816. create.dev[DISK_NAME_LEN - 1] = '\0';
  817. create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
  818. create.tgtname[DISK_NAME_LEN - 1] = '\0';
  819. if (create.flags != 0) {
  820. pr_err("nvm: no flags supported\n");
  821. return -EINVAL;
  822. }
  823. return __nvm_configure_create(&create);
  824. }
  825. static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
  826. {
  827. struct nvm_ioctl_remove remove;
  828. struct nvm_dev *dev;
  829. int ret = 0;
  830. if (!capable(CAP_SYS_ADMIN))
  831. return -EPERM;
  832. if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
  833. return -EFAULT;
  834. remove.tgtname[DISK_NAME_LEN - 1] = '\0';
  835. if (remove.flags != 0) {
  836. pr_err("nvm: no flags supported\n");
  837. return -EINVAL;
  838. }
  839. list_for_each_entry(dev, &nvm_devices, devices) {
  840. ret = dev->mt->remove_tgt(dev, &remove);
  841. if (!ret)
  842. break;
  843. }
  844. return ret;
  845. }
  846. static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
  847. {
  848. info->seqnr = 1;
  849. info->erase_cnt = 0;
  850. info->version = 1;
  851. }
  852. static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
  853. {
  854. struct nvm_dev *dev;
  855. struct nvm_sb_info info;
  856. int ret;
  857. down_write(&nvm_lock);
  858. dev = nvm_find_nvm_dev(init->dev);
  859. up_write(&nvm_lock);
  860. if (!dev) {
  861. pr_err("nvm: device not found\n");
  862. return -EINVAL;
  863. }
  864. nvm_setup_nvm_sb_info(&info);
  865. strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
  866. info.fs_ppa.ppa = -1;
  867. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
  868. ret = nvm_init_sysblock(dev, &info);
  869. if (ret)
  870. return ret;
  871. }
  872. memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
  873. down_write(&nvm_lock);
  874. dev->mt = nvm_init_mgr(dev);
  875. up_write(&nvm_lock);
  876. return 0;
  877. }
  878. static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
  879. {
  880. struct nvm_ioctl_dev_init init;
  881. if (!capable(CAP_SYS_ADMIN))
  882. return -EPERM;
  883. if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
  884. return -EFAULT;
  885. if (init.flags != 0) {
  886. pr_err("nvm: no flags supported\n");
  887. return -EINVAL;
  888. }
  889. init.dev[DISK_NAME_LEN - 1] = '\0';
  890. return __nvm_ioctl_dev_init(&init);
  891. }
  892. static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
  893. {
  894. struct nvm_ioctl_dev_factory fact;
  895. struct nvm_dev *dev;
  896. if (!capable(CAP_SYS_ADMIN))
  897. return -EPERM;
  898. if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
  899. return -EFAULT;
  900. fact.dev[DISK_NAME_LEN - 1] = '\0';
  901. if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
  902. return -EINVAL;
  903. down_write(&nvm_lock);
  904. dev = nvm_find_nvm_dev(fact.dev);
  905. up_write(&nvm_lock);
  906. if (!dev) {
  907. pr_err("nvm: device not found\n");
  908. return -EINVAL;
  909. }
  910. nvm_free_mgr(dev);
  911. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
  912. return nvm_dev_factory(dev, fact.flags);
  913. return 0;
  914. }
  915. static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
  916. {
  917. void __user *argp = (void __user *)arg;
  918. switch (cmd) {
  919. case NVM_INFO:
  920. return nvm_ioctl_info(file, argp);
  921. case NVM_GET_DEVICES:
  922. return nvm_ioctl_get_devices(file, argp);
  923. case NVM_DEV_CREATE:
  924. return nvm_ioctl_dev_create(file, argp);
  925. case NVM_DEV_REMOVE:
  926. return nvm_ioctl_dev_remove(file, argp);
  927. case NVM_DEV_INIT:
  928. return nvm_ioctl_dev_init(file, argp);
  929. case NVM_DEV_FACTORY:
  930. return nvm_ioctl_dev_factory(file, argp);
  931. }
  932. return 0;
  933. }
  934. static const struct file_operations _ctl_fops = {
  935. .open = nonseekable_open,
  936. .unlocked_ioctl = nvm_ctl_ioctl,
  937. .owner = THIS_MODULE,
  938. .llseek = noop_llseek,
  939. };
  940. static struct miscdevice _nvm_misc = {
  941. .minor = MISC_DYNAMIC_MINOR,
  942. .name = "lightnvm",
  943. .nodename = "lightnvm/control",
  944. .fops = &_ctl_fops,
  945. };
  946. MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
  947. static int __init nvm_mod_init(void)
  948. {
  949. int ret;
  950. ret = misc_register(&_nvm_misc);
  951. if (ret)
  952. pr_err("nvm: misc_register failed for control device");
  953. return ret;
  954. }
  955. static void __exit nvm_mod_exit(void)
  956. {
  957. misc_deregister(&_nvm_misc);
  958. }
  959. MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
  960. MODULE_LICENSE("GPL v2");
  961. MODULE_VERSION("0.1");
  962. module_init(nvm_mod_init);
  963. module_exit(nvm_mod_exit);