core.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
  3. * Initial release: Matias Bjorling <m@bjorling.me>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version
  7. * 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; see the file COPYING. If not, write to
  16. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  17. * USA.
  18. *
  19. */
  20. #include <linux/list.h>
  21. #include <linux/types.h>
  22. #include <linux/sem.h>
  23. #include <linux/bitmap.h>
  24. #include <linux/module.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/lightnvm.h>
  27. #include <linux/sched/sysctl.h>
  28. static LIST_HEAD(nvm_tgt_types);
  29. static LIST_HEAD(nvm_mgrs);
  30. static LIST_HEAD(nvm_devices);
  31. static DECLARE_RWSEM(nvm_lock);
  32. struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
  33. {
  34. struct nvm_tgt_type *tmp, *tt = NULL;
  35. if (lock)
  36. down_write(&nvm_lock);
  37. list_for_each_entry(tmp, &nvm_tgt_types, list)
  38. if (!strcmp(name, tmp->name)) {
  39. tt = tmp;
  40. break;
  41. }
  42. if (lock)
  43. up_write(&nvm_lock);
  44. return tt;
  45. }
  46. EXPORT_SYMBOL(nvm_find_target_type);
  47. int nvm_register_tgt_type(struct nvm_tgt_type *tt)
  48. {
  49. int ret = 0;
  50. down_write(&nvm_lock);
  51. if (nvm_find_target_type(tt->name, 0))
  52. ret = -EEXIST;
  53. else
  54. list_add(&tt->list, &nvm_tgt_types);
  55. up_write(&nvm_lock);
  56. return ret;
  57. }
  58. EXPORT_SYMBOL(nvm_register_tgt_type);
  59. void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
  60. {
  61. if (!tt)
  62. return;
  63. down_write(&nvm_lock);
  64. list_del(&tt->list);
  65. up_write(&nvm_lock);
  66. }
  67. EXPORT_SYMBOL(nvm_unregister_tgt_type);
  68. void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
  69. dma_addr_t *dma_handler)
  70. {
  71. return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
  72. dma_handler);
  73. }
  74. EXPORT_SYMBOL(nvm_dev_dma_alloc);
  75. void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
  76. dma_addr_t dma_handler)
  77. {
  78. dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
  79. }
  80. EXPORT_SYMBOL(nvm_dev_dma_free);
  81. static struct nvmm_type *nvm_find_mgr_type(const char *name)
  82. {
  83. struct nvmm_type *mt;
  84. list_for_each_entry(mt, &nvm_mgrs, list)
  85. if (!strcmp(name, mt->name))
  86. return mt;
  87. return NULL;
  88. }
  89. static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
  90. {
  91. struct nvmm_type *mt;
  92. int ret;
  93. lockdep_assert_held(&nvm_lock);
  94. list_for_each_entry(mt, &nvm_mgrs, list) {
  95. if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
  96. continue;
  97. ret = mt->register_mgr(dev);
  98. if (ret < 0) {
  99. pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
  100. ret, dev->name);
  101. return NULL; /* initialization failed */
  102. } else if (ret > 0)
  103. return mt;
  104. }
  105. return NULL;
  106. }
  107. int nvm_register_mgr(struct nvmm_type *mt)
  108. {
  109. struct nvm_dev *dev;
  110. int ret = 0;
  111. down_write(&nvm_lock);
  112. if (nvm_find_mgr_type(mt->name)) {
  113. ret = -EEXIST;
  114. goto finish;
  115. } else {
  116. list_add(&mt->list, &nvm_mgrs);
  117. }
  118. /* try to register media mgr if any device have none configured */
  119. list_for_each_entry(dev, &nvm_devices, devices) {
  120. if (dev->mt)
  121. continue;
  122. dev->mt = nvm_init_mgr(dev);
  123. }
  124. finish:
  125. up_write(&nvm_lock);
  126. return ret;
  127. }
  128. EXPORT_SYMBOL(nvm_register_mgr);
  129. void nvm_unregister_mgr(struct nvmm_type *mt)
  130. {
  131. if (!mt)
  132. return;
  133. down_write(&nvm_lock);
  134. list_del(&mt->list);
  135. up_write(&nvm_lock);
  136. }
  137. EXPORT_SYMBOL(nvm_unregister_mgr);
  138. static struct nvm_dev *nvm_find_nvm_dev(const char *name)
  139. {
  140. struct nvm_dev *dev;
  141. list_for_each_entry(dev, &nvm_devices, devices)
  142. if (!strcmp(name, dev->name))
  143. return dev;
  144. return NULL;
  145. }
  146. struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
  147. unsigned long flags)
  148. {
  149. return dev->mt->get_blk_unlocked(dev, lun, flags);
  150. }
  151. EXPORT_SYMBOL(nvm_get_blk_unlocked);
  152. /* Assumes that all valid pages have already been moved on release to bm */
  153. void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
  154. {
  155. return dev->mt->put_blk_unlocked(dev, blk);
  156. }
  157. EXPORT_SYMBOL(nvm_put_blk_unlocked);
  158. struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
  159. unsigned long flags)
  160. {
  161. return dev->mt->get_blk(dev, lun, flags);
  162. }
  163. EXPORT_SYMBOL(nvm_get_blk);
  164. /* Assumes that all valid pages have already been moved on release to bm */
  165. void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
  166. {
  167. return dev->mt->put_blk(dev, blk);
  168. }
  169. EXPORT_SYMBOL(nvm_put_blk);
  170. void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
  171. {
  172. return dev->mt->mark_blk(dev, ppa, type);
  173. }
  174. EXPORT_SYMBOL(nvm_mark_blk);
  175. int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
  176. {
  177. return dev->mt->submit_io(dev, rqd);
  178. }
  179. EXPORT_SYMBOL(nvm_submit_io);
  180. int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
  181. {
  182. return dev->mt->erase_blk(dev, blk, 0);
  183. }
  184. EXPORT_SYMBOL(nvm_erase_blk);
  185. void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
  186. {
  187. int i;
  188. if (rqd->nr_ppas > 1) {
  189. for (i = 0; i < rqd->nr_ppas; i++)
  190. rqd->ppa_list[i] = dev_to_generic_addr(dev,
  191. rqd->ppa_list[i]);
  192. } else {
  193. rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
  194. }
  195. }
  196. EXPORT_SYMBOL(nvm_addr_to_generic_mode);
  197. void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
  198. {
  199. int i;
  200. if (rqd->nr_ppas > 1) {
  201. for (i = 0; i < rqd->nr_ppas; i++)
  202. rqd->ppa_list[i] = generic_to_dev_addr(dev,
  203. rqd->ppa_list[i]);
  204. } else {
  205. rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
  206. }
  207. }
  208. EXPORT_SYMBOL(nvm_generic_to_addr_mode);
  209. int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
  210. struct ppa_addr *ppas, int nr_ppas, int vblk)
  211. {
  212. int i, plane_cnt, pl_idx;
  213. if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
  214. rqd->nr_ppas = nr_ppas;
  215. rqd->ppa_addr = ppas[0];
  216. return 0;
  217. }
  218. rqd->nr_ppas = nr_ppas;
  219. rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
  220. if (!rqd->ppa_list) {
  221. pr_err("nvm: failed to allocate dma memory\n");
  222. return -ENOMEM;
  223. }
  224. if (!vblk) {
  225. for (i = 0; i < nr_ppas; i++)
  226. rqd->ppa_list[i] = ppas[i];
  227. } else {
  228. plane_cnt = dev->plane_mode;
  229. rqd->nr_ppas *= plane_cnt;
  230. for (i = 0; i < nr_ppas; i++) {
  231. for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
  232. ppas[i].g.pl = pl_idx;
  233. rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
  234. }
  235. }
  236. }
  237. return 0;
  238. }
  239. EXPORT_SYMBOL(nvm_set_rqd_ppalist);
  240. void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
  241. {
  242. if (!rqd->ppa_list)
  243. return;
  244. nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
  245. }
  246. EXPORT_SYMBOL(nvm_free_rqd_ppalist);
  247. int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
  248. {
  249. struct nvm_rq rqd;
  250. int ret;
  251. if (!dev->ops->erase_block)
  252. return 0;
  253. memset(&rqd, 0, sizeof(struct nvm_rq));
  254. ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
  255. if (ret)
  256. return ret;
  257. nvm_generic_to_addr_mode(dev, &rqd);
  258. ret = dev->ops->erase_block(dev, &rqd);
  259. nvm_free_rqd_ppalist(dev, &rqd);
  260. return ret;
  261. }
  262. EXPORT_SYMBOL(nvm_erase_ppa);
  263. void nvm_end_io(struct nvm_rq *rqd, int error)
  264. {
  265. rqd->error = error;
  266. rqd->end_io(rqd);
  267. }
  268. EXPORT_SYMBOL(nvm_end_io);
  269. static void nvm_end_io_sync(struct nvm_rq *rqd)
  270. {
  271. struct completion *waiting = rqd->wait;
  272. rqd->wait = NULL;
  273. complete(waiting);
  274. }
  275. int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
  276. int flags, void *buf, int len)
  277. {
  278. DECLARE_COMPLETION_ONSTACK(wait);
  279. struct bio *bio;
  280. int ret;
  281. unsigned long hang_check;
  282. bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
  283. if (IS_ERR_OR_NULL(bio))
  284. return -ENOMEM;
  285. nvm_generic_to_addr_mode(dev, rqd);
  286. rqd->dev = dev;
  287. rqd->opcode = opcode;
  288. rqd->flags = flags;
  289. rqd->bio = bio;
  290. rqd->wait = &wait;
  291. rqd->end_io = nvm_end_io_sync;
  292. ret = dev->ops->submit_io(dev, rqd);
  293. if (ret) {
  294. bio_put(bio);
  295. return ret;
  296. }
  297. /* Prevent hang_check timer from firing at us during very long I/O */
  298. hang_check = sysctl_hung_task_timeout_secs;
  299. if (hang_check)
  300. while (!wait_for_completion_io_timeout(&wait,
  301. hang_check * (HZ/2)))
  302. ;
  303. else
  304. wait_for_completion_io(&wait);
  305. return rqd->error;
  306. }
  307. /**
  308. * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
  309. * take to free ppa list if necessary.
  310. * @dev: device
  311. * @ppa_list: user created ppa_list
  312. * @nr_ppas: length of ppa_list
  313. * @opcode: device opcode
  314. * @flags: device flags
  315. * @buf: data buffer
  316. * @len: data buffer length
  317. */
  318. int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
  319. int nr_ppas, int opcode, int flags, void *buf, int len)
  320. {
  321. struct nvm_rq rqd;
  322. if (dev->ops->max_phys_sect < nr_ppas)
  323. return -EINVAL;
  324. memset(&rqd, 0, sizeof(struct nvm_rq));
  325. rqd.nr_ppas = nr_ppas;
  326. if (nr_ppas > 1)
  327. rqd.ppa_list = ppa_list;
  328. else
  329. rqd.ppa_addr = ppa_list[0];
  330. return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
  331. }
  332. EXPORT_SYMBOL(nvm_submit_ppa_list);
  333. /**
  334. * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
  335. * as single, dual, quad plane PPAs depending on device type.
  336. * @dev: device
  337. * @ppa: user created ppa_list
  338. * @nr_ppas: length of ppa_list
  339. * @opcode: device opcode
  340. * @flags: device flags
  341. * @buf: data buffer
  342. * @len: data buffer length
  343. */
  344. int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
  345. int opcode, int flags, void *buf, int len)
  346. {
  347. struct nvm_rq rqd;
  348. int ret;
  349. memset(&rqd, 0, sizeof(struct nvm_rq));
  350. ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
  351. if (ret)
  352. return ret;
  353. ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
  354. nvm_free_rqd_ppalist(dev, &rqd);
  355. return ret;
  356. }
  357. EXPORT_SYMBOL(nvm_submit_ppa);
  358. /*
  359. * folds a bad block list from its plane representation to its virtual
  360. * block representation. The fold is done in place and reduced size is
  361. * returned.
  362. *
  363. * If any of the planes status are bad or grown bad block, the virtual block
  364. * is marked bad. If not bad, the first plane state acts as the block state.
  365. */
  366. int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
  367. {
  368. int blk, offset, pl, blktype;
  369. if (nr_blks != dev->blks_per_lun * dev->plane_mode)
  370. return -EINVAL;
  371. for (blk = 0; blk < dev->blks_per_lun; blk++) {
  372. offset = blk * dev->plane_mode;
  373. blktype = blks[offset];
  374. /* Bad blocks on any planes take precedence over other types */
  375. for (pl = 0; pl < dev->plane_mode; pl++) {
  376. if (blks[offset + pl] &
  377. (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
  378. blktype = blks[offset + pl];
  379. break;
  380. }
  381. }
  382. blks[blk] = blktype;
  383. }
  384. return dev->blks_per_lun;
  385. }
  386. EXPORT_SYMBOL(nvm_bb_tbl_fold);
  387. int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
  388. {
  389. ppa = generic_to_dev_addr(dev, ppa);
  390. return dev->ops->get_bb_tbl(dev, ppa, blks);
  391. }
  392. EXPORT_SYMBOL(nvm_get_bb_tbl);
  393. static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
  394. {
  395. int i;
  396. dev->lps_per_blk = dev->pgs_per_blk;
  397. dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
  398. if (!dev->lptbl)
  399. return -ENOMEM;
  400. /* Just a linear array */
  401. for (i = 0; i < dev->lps_per_blk; i++)
  402. dev->lptbl[i] = i;
  403. return 0;
  404. }
  405. static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
  406. {
  407. int i, p;
  408. struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
  409. if (!mlc->num_pairs)
  410. return 0;
  411. dev->lps_per_blk = mlc->num_pairs;
  412. dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
  413. if (!dev->lptbl)
  414. return -ENOMEM;
  415. /* The lower page table encoding consists of a list of bytes, where each
  416. * has a lower and an upper half. The first half byte maintains the
  417. * increment value and every value after is an offset added to the
  418. * previous incrementation value
  419. */
  420. dev->lptbl[0] = mlc->pairs[0] & 0xF;
  421. for (i = 1; i < dev->lps_per_blk; i++) {
  422. p = mlc->pairs[i >> 1];
  423. if (i & 0x1) /* upper */
  424. dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
  425. else /* lower */
  426. dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
  427. }
  428. return 0;
  429. }
  430. static int nvm_core_init(struct nvm_dev *dev)
  431. {
  432. struct nvm_id *id = &dev->identity;
  433. struct nvm_id_group *grp = &id->groups[0];
  434. int ret;
  435. /* device values */
  436. dev->nr_chnls = grp->num_ch;
  437. dev->luns_per_chnl = grp->num_lun;
  438. dev->pgs_per_blk = grp->num_pg;
  439. dev->blks_per_lun = grp->num_blk;
  440. dev->nr_planes = grp->num_pln;
  441. dev->fpg_size = grp->fpg_sz;
  442. dev->pfpg_size = grp->fpg_sz * grp->num_pln;
  443. dev->sec_size = grp->csecs;
  444. dev->oob_size = grp->sos;
  445. dev->sec_per_pg = grp->fpg_sz / grp->csecs;
  446. dev->mccap = grp->mccap;
  447. memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
  448. dev->plane_mode = NVM_PLANE_SINGLE;
  449. dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
  450. if (grp->mpos & 0x020202)
  451. dev->plane_mode = NVM_PLANE_DOUBLE;
  452. if (grp->mpos & 0x040404)
  453. dev->plane_mode = NVM_PLANE_QUAD;
  454. if (grp->mtype != 0) {
  455. pr_err("nvm: memory type not supported\n");
  456. return -EINVAL;
  457. }
  458. /* calculated values */
  459. dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
  460. dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
  461. dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
  462. dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
  463. dev->total_secs = dev->nr_luns * dev->sec_per_lun;
  464. dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
  465. sizeof(unsigned long), GFP_KERNEL);
  466. if (!dev->lun_map)
  467. return -ENOMEM;
  468. switch (grp->fmtype) {
  469. case NVM_ID_FMTYPE_SLC:
  470. if (nvm_init_slc_tbl(dev, grp)) {
  471. ret = -ENOMEM;
  472. goto err_fmtype;
  473. }
  474. break;
  475. case NVM_ID_FMTYPE_MLC:
  476. if (nvm_init_mlc_tbl(dev, grp)) {
  477. ret = -ENOMEM;
  478. goto err_fmtype;
  479. }
  480. break;
  481. default:
  482. pr_err("nvm: flash type not supported\n");
  483. ret = -EINVAL;
  484. goto err_fmtype;
  485. }
  486. mutex_init(&dev->mlock);
  487. spin_lock_init(&dev->lock);
  488. return 0;
  489. err_fmtype:
  490. kfree(dev->lun_map);
  491. return ret;
  492. }
  493. static void nvm_free_mgr(struct nvm_dev *dev)
  494. {
  495. if (!dev->mt)
  496. return;
  497. dev->mt->unregister_mgr(dev);
  498. dev->mt = NULL;
  499. }
  500. static void nvm_free(struct nvm_dev *dev)
  501. {
  502. if (!dev)
  503. return;
  504. nvm_free_mgr(dev);
  505. kfree(dev->lptbl);
  506. kfree(dev->lun_map);
  507. }
  508. static int nvm_init(struct nvm_dev *dev)
  509. {
  510. int ret = -EINVAL;
  511. if (!dev->q || !dev->ops)
  512. return ret;
  513. if (dev->ops->identity(dev, &dev->identity)) {
  514. pr_err("nvm: device could not be identified\n");
  515. goto err;
  516. }
  517. pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
  518. dev->identity.ver_id, dev->identity.vmnt,
  519. dev->identity.cgrps);
  520. if (dev->identity.ver_id != 1) {
  521. pr_err("nvm: device not supported by kernel.");
  522. goto err;
  523. }
  524. if (dev->identity.cgrps != 1) {
  525. pr_err("nvm: only one group configuration supported.");
  526. goto err;
  527. }
  528. ret = nvm_core_init(dev);
  529. if (ret) {
  530. pr_err("nvm: could not initialize core structures.\n");
  531. goto err;
  532. }
  533. pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
  534. dev->name, dev->sec_per_pg, dev->nr_planes,
  535. dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
  536. dev->nr_chnls);
  537. return 0;
  538. err:
  539. pr_err("nvm: failed to initialize nvm\n");
  540. return ret;
  541. }
  542. static void nvm_exit(struct nvm_dev *dev)
  543. {
  544. if (dev->dma_pool)
  545. dev->ops->destroy_dma_pool(dev->dma_pool);
  546. nvm_free(dev);
  547. pr_info("nvm: successfully unloaded\n");
  548. }
  549. int nvm_register(struct request_queue *q, char *disk_name,
  550. struct nvm_dev_ops *ops)
  551. {
  552. struct nvm_dev *dev;
  553. int ret;
  554. if (!ops->identity)
  555. return -EINVAL;
  556. dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
  557. if (!dev)
  558. return -ENOMEM;
  559. dev->q = q;
  560. dev->ops = ops;
  561. strncpy(dev->name, disk_name, DISK_NAME_LEN);
  562. ret = nvm_init(dev);
  563. if (ret)
  564. goto err_init;
  565. if (dev->ops->max_phys_sect > 256) {
  566. pr_info("nvm: max sectors supported is 256.\n");
  567. ret = -EINVAL;
  568. goto err_init;
  569. }
  570. if (dev->ops->max_phys_sect > 1) {
  571. dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
  572. if (!dev->dma_pool) {
  573. pr_err("nvm: could not create dma pool\n");
  574. ret = -ENOMEM;
  575. goto err_init;
  576. }
  577. }
  578. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
  579. ret = nvm_get_sysblock(dev, &dev->sb);
  580. if (!ret)
  581. pr_err("nvm: device not initialized.\n");
  582. else if (ret < 0)
  583. pr_err("nvm: err (%d) on device initialization\n", ret);
  584. }
  585. /* register device with a supported media manager */
  586. down_write(&nvm_lock);
  587. if (ret > 0)
  588. dev->mt = nvm_init_mgr(dev);
  589. list_add(&dev->devices, &nvm_devices);
  590. up_write(&nvm_lock);
  591. return 0;
  592. err_init:
  593. kfree(dev->lun_map);
  594. kfree(dev);
  595. return ret;
  596. }
  597. EXPORT_SYMBOL(nvm_register);
  598. void nvm_unregister(char *disk_name)
  599. {
  600. struct nvm_dev *dev;
  601. down_write(&nvm_lock);
  602. dev = nvm_find_nvm_dev(disk_name);
  603. if (!dev) {
  604. pr_err("nvm: could not find device %s to unregister\n",
  605. disk_name);
  606. up_write(&nvm_lock);
  607. return;
  608. }
  609. list_del(&dev->devices);
  610. up_write(&nvm_lock);
  611. nvm_exit(dev);
  612. kfree(dev);
  613. }
  614. EXPORT_SYMBOL(nvm_unregister);
  615. static int __nvm_configure_create(struct nvm_ioctl_create *create)
  616. {
  617. struct nvm_dev *dev;
  618. struct nvm_ioctl_create_simple *s;
  619. down_write(&nvm_lock);
  620. dev = nvm_find_nvm_dev(create->dev);
  621. up_write(&nvm_lock);
  622. if (!dev) {
  623. pr_err("nvm: device not found\n");
  624. return -EINVAL;
  625. }
  626. if (!dev->mt) {
  627. pr_info("nvm: device has no media manager registered.\n");
  628. return -ENODEV;
  629. }
  630. if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
  631. pr_err("nvm: config type not valid\n");
  632. return -EINVAL;
  633. }
  634. s = &create->conf.s;
  635. if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
  636. pr_err("nvm: lun out of bound (%u:%u > %u)\n",
  637. s->lun_begin, s->lun_end, dev->nr_luns);
  638. return -EINVAL;
  639. }
  640. return dev->mt->create_tgt(dev, create);
  641. }
  642. #ifdef CONFIG_NVM_DEBUG
  643. static int nvm_configure_show(const char *val)
  644. {
  645. struct nvm_dev *dev;
  646. char opcode, devname[DISK_NAME_LEN];
  647. int ret;
  648. ret = sscanf(val, "%c %32s", &opcode, devname);
  649. if (ret != 2) {
  650. pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
  651. return -EINVAL;
  652. }
  653. down_write(&nvm_lock);
  654. dev = nvm_find_nvm_dev(devname);
  655. up_write(&nvm_lock);
  656. if (!dev) {
  657. pr_err("nvm: device not found\n");
  658. return -EINVAL;
  659. }
  660. if (!dev->mt)
  661. return 0;
  662. dev->mt->lun_info_print(dev);
  663. return 0;
  664. }
  665. static int nvm_configure_remove(const char *val)
  666. {
  667. struct nvm_ioctl_remove remove;
  668. struct nvm_dev *dev;
  669. char opcode;
  670. int ret = 0;
  671. ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
  672. if (ret != 2) {
  673. pr_err("nvm: invalid command. Use \"d targetname\".\n");
  674. return -EINVAL;
  675. }
  676. remove.flags = 0;
  677. list_for_each_entry(dev, &nvm_devices, devices) {
  678. ret = dev->mt->remove_tgt(dev, &remove);
  679. if (!ret)
  680. break;
  681. }
  682. return ret;
  683. }
  684. static int nvm_configure_create(const char *val)
  685. {
  686. struct nvm_ioctl_create create;
  687. char opcode;
  688. int lun_begin, lun_end, ret;
  689. ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
  690. create.tgtname, create.tgttype,
  691. &lun_begin, &lun_end);
  692. if (ret != 6) {
  693. pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
  694. return -EINVAL;
  695. }
  696. create.flags = 0;
  697. create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
  698. create.conf.s.lun_begin = lun_begin;
  699. create.conf.s.lun_end = lun_end;
  700. return __nvm_configure_create(&create);
  701. }
  702. /* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
  703. static int nvm_configure_by_str_event(const char *val,
  704. const struct kernel_param *kp)
  705. {
  706. char opcode;
  707. int ret;
  708. ret = sscanf(val, "%c", &opcode);
  709. if (ret != 1) {
  710. pr_err("nvm: string must have the format of \"cmd ...\"\n");
  711. return -EINVAL;
  712. }
  713. switch (opcode) {
  714. case 'a':
  715. return nvm_configure_create(val);
  716. case 'd':
  717. return nvm_configure_remove(val);
  718. case 's':
  719. return nvm_configure_show(val);
  720. default:
  721. pr_err("nvm: invalid command\n");
  722. return -EINVAL;
  723. }
  724. return 0;
  725. }
  726. static int nvm_configure_get(char *buf, const struct kernel_param *kp)
  727. {
  728. int sz;
  729. struct nvm_dev *dev;
  730. sz = sprintf(buf, "available devices:\n");
  731. down_write(&nvm_lock);
  732. list_for_each_entry(dev, &nvm_devices, devices) {
  733. if (sz > 4095 - DISK_NAME_LEN - 2)
  734. break;
  735. sz += sprintf(buf + sz, " %32s\n", dev->name);
  736. }
  737. up_write(&nvm_lock);
  738. return sz;
  739. }
  740. static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
  741. .set = nvm_configure_by_str_event,
  742. .get = nvm_configure_get,
  743. };
  744. #undef MODULE_PARAM_PREFIX
  745. #define MODULE_PARAM_PREFIX "lnvm."
  746. module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
  747. 0644);
  748. #endif /* CONFIG_NVM_DEBUG */
  749. static long nvm_ioctl_info(struct file *file, void __user *arg)
  750. {
  751. struct nvm_ioctl_info *info;
  752. struct nvm_tgt_type *tt;
  753. int tgt_iter = 0;
  754. if (!capable(CAP_SYS_ADMIN))
  755. return -EPERM;
  756. info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
  757. if (IS_ERR(info))
  758. return -EFAULT;
  759. info->version[0] = NVM_VERSION_MAJOR;
  760. info->version[1] = NVM_VERSION_MINOR;
  761. info->version[2] = NVM_VERSION_PATCH;
  762. down_write(&nvm_lock);
  763. list_for_each_entry(tt, &nvm_tgt_types, list) {
  764. struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
  765. tgt->version[0] = tt->version[0];
  766. tgt->version[1] = tt->version[1];
  767. tgt->version[2] = tt->version[2];
  768. strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
  769. tgt_iter++;
  770. }
  771. info->tgtsize = tgt_iter;
  772. up_write(&nvm_lock);
  773. if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
  774. kfree(info);
  775. return -EFAULT;
  776. }
  777. kfree(info);
  778. return 0;
  779. }
  780. static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
  781. {
  782. struct nvm_ioctl_get_devices *devices;
  783. struct nvm_dev *dev;
  784. int i = 0;
  785. if (!capable(CAP_SYS_ADMIN))
  786. return -EPERM;
  787. devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
  788. if (!devices)
  789. return -ENOMEM;
  790. down_write(&nvm_lock);
  791. list_for_each_entry(dev, &nvm_devices, devices) {
  792. struct nvm_ioctl_device_info *info = &devices->info[i];
  793. sprintf(info->devname, "%s", dev->name);
  794. if (dev->mt) {
  795. info->bmversion[0] = dev->mt->version[0];
  796. info->bmversion[1] = dev->mt->version[1];
  797. info->bmversion[2] = dev->mt->version[2];
  798. sprintf(info->bmname, "%s", dev->mt->name);
  799. } else {
  800. sprintf(info->bmname, "none");
  801. }
  802. i++;
  803. if (i > 31) {
  804. pr_err("nvm: max 31 devices can be reported.\n");
  805. break;
  806. }
  807. }
  808. up_write(&nvm_lock);
  809. devices->nr_devices = i;
  810. if (copy_to_user(arg, devices,
  811. sizeof(struct nvm_ioctl_get_devices))) {
  812. kfree(devices);
  813. return -EFAULT;
  814. }
  815. kfree(devices);
  816. return 0;
  817. }
  818. static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
  819. {
  820. struct nvm_ioctl_create create;
  821. if (!capable(CAP_SYS_ADMIN))
  822. return -EPERM;
  823. if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
  824. return -EFAULT;
  825. create.dev[DISK_NAME_LEN - 1] = '\0';
  826. create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
  827. create.tgtname[DISK_NAME_LEN - 1] = '\0';
  828. if (create.flags != 0) {
  829. pr_err("nvm: no flags supported\n");
  830. return -EINVAL;
  831. }
  832. return __nvm_configure_create(&create);
  833. }
  834. static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
  835. {
  836. struct nvm_ioctl_remove remove;
  837. struct nvm_dev *dev;
  838. int ret = 0;
  839. if (!capable(CAP_SYS_ADMIN))
  840. return -EPERM;
  841. if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
  842. return -EFAULT;
  843. remove.tgtname[DISK_NAME_LEN - 1] = '\0';
  844. if (remove.flags != 0) {
  845. pr_err("nvm: no flags supported\n");
  846. return -EINVAL;
  847. }
  848. list_for_each_entry(dev, &nvm_devices, devices) {
  849. ret = dev->mt->remove_tgt(dev, &remove);
  850. if (!ret)
  851. break;
  852. }
  853. return ret;
  854. }
  855. static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
  856. {
  857. info->seqnr = 1;
  858. info->erase_cnt = 0;
  859. info->version = 1;
  860. }
  861. static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
  862. {
  863. struct nvm_dev *dev;
  864. struct nvm_sb_info info;
  865. int ret;
  866. down_write(&nvm_lock);
  867. dev = nvm_find_nvm_dev(init->dev);
  868. up_write(&nvm_lock);
  869. if (!dev) {
  870. pr_err("nvm: device not found\n");
  871. return -EINVAL;
  872. }
  873. nvm_setup_nvm_sb_info(&info);
  874. strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
  875. info.fs_ppa.ppa = -1;
  876. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
  877. ret = nvm_init_sysblock(dev, &info);
  878. if (ret)
  879. return ret;
  880. }
  881. memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
  882. down_write(&nvm_lock);
  883. dev->mt = nvm_init_mgr(dev);
  884. up_write(&nvm_lock);
  885. return 0;
  886. }
  887. static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
  888. {
  889. struct nvm_ioctl_dev_init init;
  890. if (!capable(CAP_SYS_ADMIN))
  891. return -EPERM;
  892. if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
  893. return -EFAULT;
  894. if (init.flags != 0) {
  895. pr_err("nvm: no flags supported\n");
  896. return -EINVAL;
  897. }
  898. init.dev[DISK_NAME_LEN - 1] = '\0';
  899. return __nvm_ioctl_dev_init(&init);
  900. }
  901. static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
  902. {
  903. struct nvm_ioctl_dev_factory fact;
  904. struct nvm_dev *dev;
  905. if (!capable(CAP_SYS_ADMIN))
  906. return -EPERM;
  907. if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
  908. return -EFAULT;
  909. fact.dev[DISK_NAME_LEN - 1] = '\0';
  910. if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
  911. return -EINVAL;
  912. down_write(&nvm_lock);
  913. dev = nvm_find_nvm_dev(fact.dev);
  914. up_write(&nvm_lock);
  915. if (!dev) {
  916. pr_err("nvm: device not found\n");
  917. return -EINVAL;
  918. }
  919. nvm_free_mgr(dev);
  920. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
  921. return nvm_dev_factory(dev, fact.flags);
  922. return 0;
  923. }
  924. static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
  925. {
  926. void __user *argp = (void __user *)arg;
  927. switch (cmd) {
  928. case NVM_INFO:
  929. return nvm_ioctl_info(file, argp);
  930. case NVM_GET_DEVICES:
  931. return nvm_ioctl_get_devices(file, argp);
  932. case NVM_DEV_CREATE:
  933. return nvm_ioctl_dev_create(file, argp);
  934. case NVM_DEV_REMOVE:
  935. return nvm_ioctl_dev_remove(file, argp);
  936. case NVM_DEV_INIT:
  937. return nvm_ioctl_dev_init(file, argp);
  938. case NVM_DEV_FACTORY:
  939. return nvm_ioctl_dev_factory(file, argp);
  940. }
  941. return 0;
  942. }
  943. static const struct file_operations _ctl_fops = {
  944. .open = nonseekable_open,
  945. .unlocked_ioctl = nvm_ctl_ioctl,
  946. .owner = THIS_MODULE,
  947. .llseek = noop_llseek,
  948. };
  949. static struct miscdevice _nvm_misc = {
  950. .minor = MISC_DYNAMIC_MINOR,
  951. .name = "lightnvm",
  952. .nodename = "lightnvm/control",
  953. .fops = &_ctl_fops,
  954. };
  955. MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
  956. static int __init nvm_mod_init(void)
  957. {
  958. int ret;
  959. ret = misc_register(&_nvm_misc);
  960. if (ret)
  961. pr_err("nvm: misc_register failed for control device");
  962. return ret;
  963. }
  964. static void __exit nvm_mod_exit(void)
  965. {
  966. misc_deregister(&_nvm_misc);
  967. }
  968. MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
  969. MODULE_LICENSE("GPL v2");
  970. MODULE_VERSION("0.1");
  971. module_init(nvm_mod_init);
  972. module_exit(nvm_mod_exit);