core.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
  3. * Initial release: Matias Bjorling <m@bjorling.me>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version
  7. * 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; see the file COPYING. If not, write to
  16. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  17. * USA.
  18. *
  19. */
  20. #include <linux/list.h>
  21. #include <linux/types.h>
  22. #include <linux/sem.h>
  23. #include <linux/bitmap.h>
  24. #include <linux/module.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/miscdevice.h>
  27. #include <linux/lightnvm.h>
  28. #include <linux/sched/sysctl.h>
  29. static LIST_HEAD(nvm_tgt_types);
  30. static DECLARE_RWSEM(nvm_tgtt_lock);
  31. static LIST_HEAD(nvm_devices);
  32. static DECLARE_RWSEM(nvm_lock);
  33. /* Map between virtual and physical channel and lun */
  34. struct nvm_ch_map {
  35. int ch_off;
  36. int nr_luns;
  37. int *lun_offs;
  38. };
  39. struct nvm_dev_map {
  40. struct nvm_ch_map *chnls;
  41. int nr_chnls;
  42. };
  43. static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
  44. {
  45. struct nvm_target *tgt;
  46. list_for_each_entry(tgt, &dev->targets, list)
  47. if (!strcmp(name, tgt->disk->disk_name))
  48. return tgt;
  49. return NULL;
  50. }
  51. static bool nvm_target_exists(const char *name)
  52. {
  53. struct nvm_dev *dev;
  54. struct nvm_target *tgt;
  55. bool ret = false;
  56. down_write(&nvm_lock);
  57. list_for_each_entry(dev, &nvm_devices, devices) {
  58. mutex_lock(&dev->mlock);
  59. list_for_each_entry(tgt, &dev->targets, list) {
  60. if (!strcmp(name, tgt->disk->disk_name)) {
  61. ret = true;
  62. mutex_unlock(&dev->mlock);
  63. goto out;
  64. }
  65. }
  66. mutex_unlock(&dev->mlock);
  67. }
  68. out:
  69. up_write(&nvm_lock);
  70. return ret;
  71. }
  72. static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
  73. {
  74. int i;
  75. for (i = lun_begin; i <= lun_end; i++) {
  76. if (test_and_set_bit(i, dev->lun_map)) {
  77. pr_err("nvm: lun %d already allocated\n", i);
  78. goto err;
  79. }
  80. }
  81. return 0;
  82. err:
  83. while (--i >= lun_begin)
  84. clear_bit(i, dev->lun_map);
  85. return -EBUSY;
  86. }
  87. static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
  88. int lun_end)
  89. {
  90. int i;
  91. for (i = lun_begin; i <= lun_end; i++)
  92. WARN_ON(!test_and_clear_bit(i, dev->lun_map));
  93. }
  94. static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
  95. {
  96. struct nvm_dev *dev = tgt_dev->parent;
  97. struct nvm_dev_map *dev_map = tgt_dev->map;
  98. int i, j;
  99. for (i = 0; i < dev_map->nr_chnls; i++) {
  100. struct nvm_ch_map *ch_map = &dev_map->chnls[i];
  101. int *lun_offs = ch_map->lun_offs;
  102. int ch = i + ch_map->ch_off;
  103. if (clear) {
  104. for (j = 0; j < ch_map->nr_luns; j++) {
  105. int lun = j + lun_offs[j];
  106. int lunid = (ch * dev->geo.nr_luns) + lun;
  107. WARN_ON(!test_and_clear_bit(lunid,
  108. dev->lun_map));
  109. }
  110. }
  111. kfree(ch_map->lun_offs);
  112. }
  113. kfree(dev_map->chnls);
  114. kfree(dev_map);
  115. kfree(tgt_dev->luns);
  116. kfree(tgt_dev);
  117. }
  118. static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
  119. u16 lun_begin, u16 lun_end,
  120. u16 op)
  121. {
  122. struct nvm_tgt_dev *tgt_dev = NULL;
  123. struct nvm_dev_map *dev_rmap = dev->rmap;
  124. struct nvm_dev_map *dev_map;
  125. struct ppa_addr *luns;
  126. int nr_luns = lun_end - lun_begin + 1;
  127. int luns_left = nr_luns;
  128. int nr_chnls = nr_luns / dev->geo.nr_luns;
  129. int nr_chnls_mod = nr_luns % dev->geo.nr_luns;
  130. int bch = lun_begin / dev->geo.nr_luns;
  131. int blun = lun_begin % dev->geo.nr_luns;
  132. int lunid = 0;
  133. int lun_balanced = 1;
  134. int prev_nr_luns;
  135. int i, j;
  136. nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
  137. dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
  138. if (!dev_map)
  139. goto err_dev;
  140. dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map),
  141. GFP_KERNEL);
  142. if (!dev_map->chnls)
  143. goto err_chnls;
  144. luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
  145. if (!luns)
  146. goto err_luns;
  147. prev_nr_luns = (luns_left > dev->geo.nr_luns) ?
  148. dev->geo.nr_luns : luns_left;
  149. for (i = 0; i < nr_chnls; i++) {
  150. struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
  151. int *lun_roffs = ch_rmap->lun_offs;
  152. struct nvm_ch_map *ch_map = &dev_map->chnls[i];
  153. int *lun_offs;
  154. int luns_in_chnl = (luns_left > dev->geo.nr_luns) ?
  155. dev->geo.nr_luns : luns_left;
  156. if (lun_balanced && prev_nr_luns != luns_in_chnl)
  157. lun_balanced = 0;
  158. ch_map->ch_off = ch_rmap->ch_off = bch;
  159. ch_map->nr_luns = luns_in_chnl;
  160. lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
  161. if (!lun_offs)
  162. goto err_ch;
  163. for (j = 0; j < luns_in_chnl; j++) {
  164. luns[lunid].ppa = 0;
  165. luns[lunid].g.ch = i;
  166. luns[lunid++].g.lun = j;
  167. lun_offs[j] = blun;
  168. lun_roffs[j + blun] = blun;
  169. }
  170. ch_map->lun_offs = lun_offs;
  171. /* when starting a new channel, lun offset is reset */
  172. blun = 0;
  173. luns_left -= luns_in_chnl;
  174. }
  175. dev_map->nr_chnls = nr_chnls;
  176. tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
  177. if (!tgt_dev)
  178. goto err_ch;
  179. memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
  180. /* Target device only owns a portion of the physical device */
  181. tgt_dev->geo.nr_chnls = nr_chnls;
  182. tgt_dev->geo.all_luns = nr_luns;
  183. tgt_dev->geo.nr_luns = (lun_balanced) ? prev_nr_luns : -1;
  184. tgt_dev->geo.op = op;
  185. tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
  186. tgt_dev->q = dev->q;
  187. tgt_dev->map = dev_map;
  188. tgt_dev->luns = luns;
  189. memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
  190. tgt_dev->parent = dev;
  191. return tgt_dev;
  192. err_ch:
  193. while (--i >= 0)
  194. kfree(dev_map->chnls[i].lun_offs);
  195. kfree(luns);
  196. err_luns:
  197. kfree(dev_map->chnls);
  198. err_chnls:
  199. kfree(dev_map);
  200. err_dev:
  201. return tgt_dev;
  202. }
  203. static const struct block_device_operations nvm_fops = {
  204. .owner = THIS_MODULE,
  205. };
  206. static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
  207. {
  208. struct nvm_tgt_type *tt;
  209. list_for_each_entry(tt, &nvm_tgt_types, list)
  210. if (!strcmp(name, tt->name))
  211. return tt;
  212. return NULL;
  213. }
  214. static struct nvm_tgt_type *nvm_find_target_type(const char *name)
  215. {
  216. struct nvm_tgt_type *tt;
  217. down_write(&nvm_tgtt_lock);
  218. tt = __nvm_find_target_type(name);
  219. up_write(&nvm_tgtt_lock);
  220. return tt;
  221. }
  222. static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
  223. int lun_end)
  224. {
  225. if (lun_begin > lun_end || lun_end >= geo->all_luns) {
  226. pr_err("nvm: lun out of bound (%u:%u > %u)\n",
  227. lun_begin, lun_end, geo->all_luns - 1);
  228. return -EINVAL;
  229. }
  230. return 0;
  231. }
  232. static int __nvm_config_simple(struct nvm_dev *dev,
  233. struct nvm_ioctl_create_simple *s)
  234. {
  235. struct nvm_geo *geo = &dev->geo;
  236. if (s->lun_begin == -1 && s->lun_end == -1) {
  237. s->lun_begin = 0;
  238. s->lun_end = geo->all_luns - 1;
  239. }
  240. return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
  241. }
  242. static int __nvm_config_extended(struct nvm_dev *dev,
  243. struct nvm_ioctl_create_extended *e)
  244. {
  245. struct nvm_geo *geo = &dev->geo;
  246. if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
  247. e->lun_begin = 0;
  248. e->lun_end = dev->geo.all_luns - 1;
  249. }
  250. /* op not set falls into target's default */
  251. if (e->op == 0xFFFF)
  252. e->op = NVM_TARGET_DEFAULT_OP;
  253. if (e->op < NVM_TARGET_MIN_OP ||
  254. e->op > NVM_TARGET_MAX_OP) {
  255. pr_err("nvm: invalid over provisioning value\n");
  256. return -EINVAL;
  257. }
  258. return nvm_config_check_luns(geo, e->lun_begin, e->lun_end);
  259. }
  260. static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
  261. {
  262. struct nvm_ioctl_create_extended e;
  263. struct request_queue *tqueue;
  264. struct gendisk *tdisk;
  265. struct nvm_tgt_type *tt;
  266. struct nvm_target *t;
  267. struct nvm_tgt_dev *tgt_dev;
  268. void *targetdata;
  269. int ret;
  270. switch (create->conf.type) {
  271. case NVM_CONFIG_TYPE_SIMPLE:
  272. ret = __nvm_config_simple(dev, &create->conf.s);
  273. if (ret)
  274. return ret;
  275. e.lun_begin = create->conf.s.lun_begin;
  276. e.lun_end = create->conf.s.lun_end;
  277. e.op = NVM_TARGET_DEFAULT_OP;
  278. break;
  279. case NVM_CONFIG_TYPE_EXTENDED:
  280. ret = __nvm_config_extended(dev, &create->conf.e);
  281. if (ret)
  282. return ret;
  283. e = create->conf.e;
  284. break;
  285. default:
  286. pr_err("nvm: config type not valid\n");
  287. return -EINVAL;
  288. }
  289. tt = nvm_find_target_type(create->tgttype);
  290. if (!tt) {
  291. pr_err("nvm: target type %s not found\n", create->tgttype);
  292. return -EINVAL;
  293. }
  294. if (nvm_target_exists(create->tgtname)) {
  295. pr_err("nvm: target name already exists (%s)\n",
  296. create->tgtname);
  297. return -EINVAL;
  298. }
  299. ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
  300. if (ret)
  301. return ret;
  302. t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
  303. if (!t) {
  304. ret = -ENOMEM;
  305. goto err_reserve;
  306. }
  307. tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
  308. if (!tgt_dev) {
  309. pr_err("nvm: could not create target device\n");
  310. ret = -ENOMEM;
  311. goto err_t;
  312. }
  313. tdisk = alloc_disk(0);
  314. if (!tdisk) {
  315. ret = -ENOMEM;
  316. goto err_dev;
  317. }
  318. tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
  319. if (!tqueue) {
  320. ret = -ENOMEM;
  321. goto err_disk;
  322. }
  323. blk_queue_make_request(tqueue, tt->make_rq);
  324. strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
  325. tdisk->flags = GENHD_FL_EXT_DEVT;
  326. tdisk->major = 0;
  327. tdisk->first_minor = 0;
  328. tdisk->fops = &nvm_fops;
  329. tdisk->queue = tqueue;
  330. targetdata = tt->init(tgt_dev, tdisk, create->flags);
  331. if (IS_ERR(targetdata)) {
  332. ret = PTR_ERR(targetdata);
  333. goto err_init;
  334. }
  335. tdisk->private_data = targetdata;
  336. tqueue->queuedata = targetdata;
  337. blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
  338. set_capacity(tdisk, tt->capacity(targetdata));
  339. add_disk(tdisk);
  340. if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
  341. ret = -ENOMEM;
  342. goto err_sysfs;
  343. }
  344. t->type = tt;
  345. t->disk = tdisk;
  346. t->dev = tgt_dev;
  347. mutex_lock(&dev->mlock);
  348. list_add_tail(&t->list, &dev->targets);
  349. mutex_unlock(&dev->mlock);
  350. __module_get(tt->owner);
  351. return 0;
  352. err_sysfs:
  353. if (tt->exit)
  354. tt->exit(targetdata);
  355. err_init:
  356. blk_cleanup_queue(tqueue);
  357. tdisk->queue = NULL;
  358. err_disk:
  359. put_disk(tdisk);
  360. err_dev:
  361. nvm_remove_tgt_dev(tgt_dev, 0);
  362. err_t:
  363. kfree(t);
  364. err_reserve:
  365. nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
  366. return ret;
  367. }
  368. static void __nvm_remove_target(struct nvm_target *t)
  369. {
  370. struct nvm_tgt_type *tt = t->type;
  371. struct gendisk *tdisk = t->disk;
  372. struct request_queue *q = tdisk->queue;
  373. del_gendisk(tdisk);
  374. blk_cleanup_queue(q);
  375. if (tt->sysfs_exit)
  376. tt->sysfs_exit(tdisk);
  377. if (tt->exit)
  378. tt->exit(tdisk->private_data);
  379. nvm_remove_tgt_dev(t->dev, 1);
  380. put_disk(tdisk);
  381. module_put(t->type->owner);
  382. list_del(&t->list);
  383. kfree(t);
  384. }
  385. /**
  386. * nvm_remove_tgt - Removes a target from the media manager
  387. * @dev: device
  388. * @remove: ioctl structure with target name to remove.
  389. *
  390. * Returns:
  391. * 0: on success
  392. * 1: on not found
  393. * <0: on error
  394. */
  395. static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
  396. {
  397. struct nvm_target *t;
  398. mutex_lock(&dev->mlock);
  399. t = nvm_find_target(dev, remove->tgtname);
  400. if (!t) {
  401. mutex_unlock(&dev->mlock);
  402. return 1;
  403. }
  404. __nvm_remove_target(t);
  405. mutex_unlock(&dev->mlock);
  406. return 0;
  407. }
  408. static int nvm_register_map(struct nvm_dev *dev)
  409. {
  410. struct nvm_dev_map *rmap;
  411. int i, j;
  412. rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
  413. if (!rmap)
  414. goto err_rmap;
  415. rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map),
  416. GFP_KERNEL);
  417. if (!rmap->chnls)
  418. goto err_chnls;
  419. for (i = 0; i < dev->geo.nr_chnls; i++) {
  420. struct nvm_ch_map *ch_rmap;
  421. int *lun_roffs;
  422. int luns_in_chnl = dev->geo.nr_luns;
  423. ch_rmap = &rmap->chnls[i];
  424. ch_rmap->ch_off = -1;
  425. ch_rmap->nr_luns = luns_in_chnl;
  426. lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
  427. if (!lun_roffs)
  428. goto err_ch;
  429. for (j = 0; j < luns_in_chnl; j++)
  430. lun_roffs[j] = -1;
  431. ch_rmap->lun_offs = lun_roffs;
  432. }
  433. dev->rmap = rmap;
  434. return 0;
  435. err_ch:
  436. while (--i >= 0)
  437. kfree(rmap->chnls[i].lun_offs);
  438. err_chnls:
  439. kfree(rmap);
  440. err_rmap:
  441. return -ENOMEM;
  442. }
  443. static void nvm_unregister_map(struct nvm_dev *dev)
  444. {
  445. struct nvm_dev_map *rmap = dev->rmap;
  446. int i;
  447. for (i = 0; i < dev->geo.nr_chnls; i++)
  448. kfree(rmap->chnls[i].lun_offs);
  449. kfree(rmap->chnls);
  450. kfree(rmap);
  451. }
  452. static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
  453. {
  454. struct nvm_dev_map *dev_map = tgt_dev->map;
  455. struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch];
  456. int lun_off = ch_map->lun_offs[p->g.lun];
  457. p->g.ch += ch_map->ch_off;
  458. p->g.lun += lun_off;
  459. }
  460. static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
  461. {
  462. struct nvm_dev *dev = tgt_dev->parent;
  463. struct nvm_dev_map *dev_rmap = dev->rmap;
  464. struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
  465. int lun_roff = ch_rmap->lun_offs[p->g.lun];
  466. p->g.ch -= ch_rmap->ch_off;
  467. p->g.lun -= lun_roff;
  468. }
  469. static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
  470. struct ppa_addr *ppa_list, int nr_ppas)
  471. {
  472. int i;
  473. for (i = 0; i < nr_ppas; i++) {
  474. nvm_map_to_dev(tgt_dev, &ppa_list[i]);
  475. ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]);
  476. }
  477. }
  478. static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
  479. struct ppa_addr *ppa_list, int nr_ppas)
  480. {
  481. int i;
  482. for (i = 0; i < nr_ppas; i++) {
  483. ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]);
  484. nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
  485. }
  486. }
  487. static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  488. {
  489. if (rqd->nr_ppas == 1) {
  490. nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
  491. return;
  492. }
  493. nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
  494. }
  495. static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  496. {
  497. if (rqd->nr_ppas == 1) {
  498. nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
  499. return;
  500. }
  501. nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
  502. }
  503. int nvm_register_tgt_type(struct nvm_tgt_type *tt)
  504. {
  505. int ret = 0;
  506. down_write(&nvm_tgtt_lock);
  507. if (__nvm_find_target_type(tt->name))
  508. ret = -EEXIST;
  509. else
  510. list_add(&tt->list, &nvm_tgt_types);
  511. up_write(&nvm_tgtt_lock);
  512. return ret;
  513. }
  514. EXPORT_SYMBOL(nvm_register_tgt_type);
  515. void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
  516. {
  517. if (!tt)
  518. return;
  519. down_write(&nvm_tgtt_lock);
  520. list_del(&tt->list);
  521. up_write(&nvm_tgtt_lock);
  522. }
  523. EXPORT_SYMBOL(nvm_unregister_tgt_type);
  524. void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
  525. dma_addr_t *dma_handler)
  526. {
  527. return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
  528. dma_handler);
  529. }
  530. EXPORT_SYMBOL(nvm_dev_dma_alloc);
  531. void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
  532. {
  533. dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
  534. }
  535. EXPORT_SYMBOL(nvm_dev_dma_free);
  536. static struct nvm_dev *nvm_find_nvm_dev(const char *name)
  537. {
  538. struct nvm_dev *dev;
  539. list_for_each_entry(dev, &nvm_devices, devices)
  540. if (!strcmp(name, dev->name))
  541. return dev;
  542. return NULL;
  543. }
  544. static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
  545. const struct ppa_addr *ppas, int nr_ppas)
  546. {
  547. struct nvm_dev *dev = tgt_dev->parent;
  548. struct nvm_geo *geo = &tgt_dev->geo;
  549. int i, plane_cnt, pl_idx;
  550. struct ppa_addr ppa;
  551. if (geo->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
  552. rqd->nr_ppas = nr_ppas;
  553. rqd->ppa_addr = ppas[0];
  554. return 0;
  555. }
  556. rqd->nr_ppas = nr_ppas;
  557. rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
  558. if (!rqd->ppa_list) {
  559. pr_err("nvm: failed to allocate dma memory\n");
  560. return -ENOMEM;
  561. }
  562. plane_cnt = geo->plane_mode;
  563. rqd->nr_ppas *= plane_cnt;
  564. for (i = 0; i < nr_ppas; i++) {
  565. for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
  566. ppa = ppas[i];
  567. ppa.g.pl = pl_idx;
  568. rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
  569. }
  570. }
  571. return 0;
  572. }
  573. static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
  574. struct nvm_rq *rqd)
  575. {
  576. if (!rqd->ppa_list)
  577. return;
  578. nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
  579. }
  580. int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
  581. int nr_ppas, int type)
  582. {
  583. struct nvm_dev *dev = tgt_dev->parent;
  584. struct nvm_rq rqd;
  585. int ret;
  586. if (nr_ppas > dev->ops->max_phys_sect) {
  587. pr_err("nvm: unable to update all blocks atomically\n");
  588. return -EINVAL;
  589. }
  590. memset(&rqd, 0, sizeof(struct nvm_rq));
  591. nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
  592. nvm_rq_tgt_to_dev(tgt_dev, &rqd);
  593. ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
  594. nvm_free_rqd_ppalist(tgt_dev, &rqd);
  595. if (ret) {
  596. pr_err("nvm: failed bb mark\n");
  597. return -EINVAL;
  598. }
  599. return 0;
  600. }
  601. EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
  602. int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
  603. {
  604. struct nvm_dev *dev = tgt_dev->parent;
  605. return dev->ops->max_phys_sect;
  606. }
  607. EXPORT_SYMBOL(nvm_max_phys_sects);
  608. int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  609. {
  610. struct nvm_dev *dev = tgt_dev->parent;
  611. int ret;
  612. if (!dev->ops->submit_io)
  613. return -ENODEV;
  614. nvm_rq_tgt_to_dev(tgt_dev, rqd);
  615. rqd->dev = tgt_dev;
  616. /* In case of error, fail with right address format */
  617. ret = dev->ops->submit_io(dev, rqd);
  618. if (ret)
  619. nvm_rq_dev_to_tgt(tgt_dev, rqd);
  620. return ret;
  621. }
  622. EXPORT_SYMBOL(nvm_submit_io);
  623. int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  624. {
  625. struct nvm_dev *dev = tgt_dev->parent;
  626. int ret;
  627. if (!dev->ops->submit_io_sync)
  628. return -ENODEV;
  629. nvm_rq_tgt_to_dev(tgt_dev, rqd);
  630. rqd->dev = tgt_dev;
  631. /* In case of error, fail with right address format */
  632. ret = dev->ops->submit_io_sync(dev, rqd);
  633. nvm_rq_dev_to_tgt(tgt_dev, rqd);
  634. return ret;
  635. }
  636. EXPORT_SYMBOL(nvm_submit_io_sync);
  637. void nvm_end_io(struct nvm_rq *rqd)
  638. {
  639. struct nvm_tgt_dev *tgt_dev = rqd->dev;
  640. /* Convert address space */
  641. if (tgt_dev)
  642. nvm_rq_dev_to_tgt(tgt_dev, rqd);
  643. if (rqd->end_io)
  644. rqd->end_io(rqd);
  645. }
  646. EXPORT_SYMBOL(nvm_end_io);
  647. /*
  648. * folds a bad block list from its plane representation to its virtual
  649. * block representation. The fold is done in place and reduced size is
  650. * returned.
  651. *
  652. * If any of the planes status are bad or grown bad block, the virtual block
  653. * is marked bad. If not bad, the first plane state acts as the block state.
  654. */
  655. int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
  656. {
  657. struct nvm_geo *geo = &dev->geo;
  658. int blk, offset, pl, blktype;
  659. if (nr_blks != geo->nr_chks * geo->plane_mode)
  660. return -EINVAL;
  661. for (blk = 0; blk < geo->nr_chks; blk++) {
  662. offset = blk * geo->plane_mode;
  663. blktype = blks[offset];
  664. /* Bad blocks on any planes take precedence over other types */
  665. for (pl = 0; pl < geo->plane_mode; pl++) {
  666. if (blks[offset + pl] &
  667. (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
  668. blktype = blks[offset + pl];
  669. break;
  670. }
  671. }
  672. blks[blk] = blktype;
  673. }
  674. return geo->nr_chks;
  675. }
  676. EXPORT_SYMBOL(nvm_bb_tbl_fold);
  677. int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
  678. u8 *blks)
  679. {
  680. struct nvm_dev *dev = tgt_dev->parent;
  681. nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
  682. return dev->ops->get_bb_tbl(dev, ppa, blks);
  683. }
  684. EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
  685. static int nvm_core_init(struct nvm_dev *dev)
  686. {
  687. struct nvm_id *id = &dev->identity;
  688. struct nvm_id_group *grp = &id->grp;
  689. struct nvm_geo *geo = &dev->geo;
  690. int ret;
  691. memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
  692. if (grp->mtype != 0) {
  693. pr_err("nvm: memory type not supported\n");
  694. return -EINVAL;
  695. }
  696. /* Whole device values */
  697. geo->nr_chnls = grp->num_ch;
  698. geo->nr_luns = grp->num_lun;
  699. /* Generic device geometry values */
  700. geo->ws_min = grp->ws_min;
  701. geo->ws_opt = grp->ws_opt;
  702. geo->ws_seq = grp->ws_seq;
  703. geo->ws_per_chk = grp->ws_per_chk;
  704. geo->nr_chks = grp->num_chk;
  705. geo->sec_size = grp->csecs;
  706. geo->oob_size = grp->sos;
  707. geo->mccap = grp->mccap;
  708. geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
  709. geo->sec_per_chk = grp->clba;
  710. geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks;
  711. geo->all_luns = geo->nr_luns * geo->nr_chnls;
  712. /* 1.2 spec device geometry values */
  713. geo->plane_mode = 1 << geo->ws_seq;
  714. geo->nr_planes = geo->ws_opt / geo->ws_min;
  715. geo->sec_per_pg = geo->ws_min;
  716. geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
  717. dev->total_secs = geo->all_luns * geo->sec_per_lun;
  718. dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
  719. sizeof(unsigned long), GFP_KERNEL);
  720. if (!dev->lun_map)
  721. return -ENOMEM;
  722. INIT_LIST_HEAD(&dev->area_list);
  723. INIT_LIST_HEAD(&dev->targets);
  724. mutex_init(&dev->mlock);
  725. spin_lock_init(&dev->lock);
  726. ret = nvm_register_map(dev);
  727. if (ret)
  728. goto err_fmtype;
  729. blk_queue_logical_block_size(dev->q, geo->sec_size);
  730. return 0;
  731. err_fmtype:
  732. kfree(dev->lun_map);
  733. return ret;
  734. }
  735. static void nvm_free(struct nvm_dev *dev)
  736. {
  737. if (!dev)
  738. return;
  739. if (dev->dma_pool)
  740. dev->ops->destroy_dma_pool(dev->dma_pool);
  741. nvm_unregister_map(dev);
  742. kfree(dev->lun_map);
  743. kfree(dev);
  744. }
  745. static int nvm_init(struct nvm_dev *dev)
  746. {
  747. struct nvm_geo *geo = &dev->geo;
  748. int ret = -EINVAL;
  749. if (dev->ops->identity(dev, &dev->identity)) {
  750. pr_err("nvm: device could not be identified\n");
  751. goto err;
  752. }
  753. pr_debug("nvm: ver:%x nvm_vendor:%x\n",
  754. dev->identity.ver_id, dev->identity.vmnt);
  755. if (dev->identity.ver_id != 1) {
  756. pr_err("nvm: device not supported by kernel.");
  757. goto err;
  758. }
  759. ret = nvm_core_init(dev);
  760. if (ret) {
  761. pr_err("nvm: could not initialize core structures.\n");
  762. goto err;
  763. }
  764. pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
  765. dev->name, geo->sec_per_pg, geo->nr_planes,
  766. geo->ws_per_chk, geo->nr_chks,
  767. geo->all_luns, geo->nr_chnls);
  768. return 0;
  769. err:
  770. pr_err("nvm: failed to initialize nvm\n");
  771. return ret;
  772. }
  773. struct nvm_dev *nvm_alloc_dev(int node)
  774. {
  775. return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
  776. }
  777. EXPORT_SYMBOL(nvm_alloc_dev);
  778. int nvm_register(struct nvm_dev *dev)
  779. {
  780. int ret;
  781. if (!dev->q || !dev->ops)
  782. return -EINVAL;
  783. if (dev->ops->max_phys_sect > 256) {
  784. pr_info("nvm: max sectors supported is 256.\n");
  785. return -EINVAL;
  786. }
  787. if (dev->ops->max_phys_sect > 1) {
  788. dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
  789. if (!dev->dma_pool) {
  790. pr_err("nvm: could not create dma pool\n");
  791. return -ENOMEM;
  792. }
  793. }
  794. ret = nvm_init(dev);
  795. if (ret)
  796. goto err_init;
  797. /* register device with a supported media manager */
  798. down_write(&nvm_lock);
  799. list_add(&dev->devices, &nvm_devices);
  800. up_write(&nvm_lock);
  801. return 0;
  802. err_init:
  803. dev->ops->destroy_dma_pool(dev->dma_pool);
  804. return ret;
  805. }
  806. EXPORT_SYMBOL(nvm_register);
  807. void nvm_unregister(struct nvm_dev *dev)
  808. {
  809. struct nvm_target *t, *tmp;
  810. mutex_lock(&dev->mlock);
  811. list_for_each_entry_safe(t, tmp, &dev->targets, list) {
  812. if (t->dev->parent != dev)
  813. continue;
  814. __nvm_remove_target(t);
  815. }
  816. mutex_unlock(&dev->mlock);
  817. down_write(&nvm_lock);
  818. list_del(&dev->devices);
  819. up_write(&nvm_lock);
  820. nvm_free(dev);
  821. }
  822. EXPORT_SYMBOL(nvm_unregister);
  823. static int __nvm_configure_create(struct nvm_ioctl_create *create)
  824. {
  825. struct nvm_dev *dev;
  826. down_write(&nvm_lock);
  827. dev = nvm_find_nvm_dev(create->dev);
  828. up_write(&nvm_lock);
  829. if (!dev) {
  830. pr_err("nvm: device not found\n");
  831. return -EINVAL;
  832. }
  833. return nvm_create_tgt(dev, create);
  834. }
  835. static long nvm_ioctl_info(struct file *file, void __user *arg)
  836. {
  837. struct nvm_ioctl_info *info;
  838. struct nvm_tgt_type *tt;
  839. int tgt_iter = 0;
  840. if (!capable(CAP_SYS_ADMIN))
  841. return -EPERM;
  842. info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
  843. if (IS_ERR(info))
  844. return -EFAULT;
  845. info->version[0] = NVM_VERSION_MAJOR;
  846. info->version[1] = NVM_VERSION_MINOR;
  847. info->version[2] = NVM_VERSION_PATCH;
  848. down_write(&nvm_tgtt_lock);
  849. list_for_each_entry(tt, &nvm_tgt_types, list) {
  850. struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
  851. tgt->version[0] = tt->version[0];
  852. tgt->version[1] = tt->version[1];
  853. tgt->version[2] = tt->version[2];
  854. strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
  855. tgt_iter++;
  856. }
  857. info->tgtsize = tgt_iter;
  858. up_write(&nvm_tgtt_lock);
  859. if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
  860. kfree(info);
  861. return -EFAULT;
  862. }
  863. kfree(info);
  864. return 0;
  865. }
  866. static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
  867. {
  868. struct nvm_ioctl_get_devices *devices;
  869. struct nvm_dev *dev;
  870. int i = 0;
  871. if (!capable(CAP_SYS_ADMIN))
  872. return -EPERM;
  873. devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
  874. if (!devices)
  875. return -ENOMEM;
  876. down_write(&nvm_lock);
  877. list_for_each_entry(dev, &nvm_devices, devices) {
  878. struct nvm_ioctl_device_info *info = &devices->info[i];
  879. strlcpy(info->devname, dev->name, sizeof(info->devname));
  880. /* kept for compatibility */
  881. info->bmversion[0] = 1;
  882. info->bmversion[1] = 0;
  883. info->bmversion[2] = 0;
  884. strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
  885. i++;
  886. if (i > 31) {
  887. pr_err("nvm: max 31 devices can be reported.\n");
  888. break;
  889. }
  890. }
  891. up_write(&nvm_lock);
  892. devices->nr_devices = i;
  893. if (copy_to_user(arg, devices,
  894. sizeof(struct nvm_ioctl_get_devices))) {
  895. kfree(devices);
  896. return -EFAULT;
  897. }
  898. kfree(devices);
  899. return 0;
  900. }
  901. static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
  902. {
  903. struct nvm_ioctl_create create;
  904. if (!capable(CAP_SYS_ADMIN))
  905. return -EPERM;
  906. if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
  907. return -EFAULT;
  908. if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
  909. create.conf.e.rsv != 0) {
  910. pr_err("nvm: reserved config field in use\n");
  911. return -EINVAL;
  912. }
  913. create.dev[DISK_NAME_LEN - 1] = '\0';
  914. create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
  915. create.tgtname[DISK_NAME_LEN - 1] = '\0';
  916. if (create.flags != 0) {
  917. __u32 flags = create.flags;
  918. /* Check for valid flags */
  919. if (flags & NVM_TARGET_FACTORY)
  920. flags &= ~NVM_TARGET_FACTORY;
  921. if (flags) {
  922. pr_err("nvm: flag not supported\n");
  923. return -EINVAL;
  924. }
  925. }
  926. return __nvm_configure_create(&create);
  927. }
  928. static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
  929. {
  930. struct nvm_ioctl_remove remove;
  931. struct nvm_dev *dev;
  932. int ret = 0;
  933. if (!capable(CAP_SYS_ADMIN))
  934. return -EPERM;
  935. if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
  936. return -EFAULT;
  937. remove.tgtname[DISK_NAME_LEN - 1] = '\0';
  938. if (remove.flags != 0) {
  939. pr_err("nvm: no flags supported\n");
  940. return -EINVAL;
  941. }
  942. list_for_each_entry(dev, &nvm_devices, devices) {
  943. ret = nvm_remove_tgt(dev, &remove);
  944. if (!ret)
  945. break;
  946. }
  947. return ret;
  948. }
  949. /* kept for compatibility reasons */
  950. static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
  951. {
  952. struct nvm_ioctl_dev_init init;
  953. if (!capable(CAP_SYS_ADMIN))
  954. return -EPERM;
  955. if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
  956. return -EFAULT;
  957. if (init.flags != 0) {
  958. pr_err("nvm: no flags supported\n");
  959. return -EINVAL;
  960. }
  961. return 0;
  962. }
  963. /* Kept for compatibility reasons */
  964. static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
  965. {
  966. struct nvm_ioctl_dev_factory fact;
  967. if (!capable(CAP_SYS_ADMIN))
  968. return -EPERM;
  969. if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
  970. return -EFAULT;
  971. fact.dev[DISK_NAME_LEN - 1] = '\0';
  972. if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
  973. return -EINVAL;
  974. return 0;
  975. }
  976. static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
  977. {
  978. void __user *argp = (void __user *)arg;
  979. switch (cmd) {
  980. case NVM_INFO:
  981. return nvm_ioctl_info(file, argp);
  982. case NVM_GET_DEVICES:
  983. return nvm_ioctl_get_devices(file, argp);
  984. case NVM_DEV_CREATE:
  985. return nvm_ioctl_dev_create(file, argp);
  986. case NVM_DEV_REMOVE:
  987. return nvm_ioctl_dev_remove(file, argp);
  988. case NVM_DEV_INIT:
  989. return nvm_ioctl_dev_init(file, argp);
  990. case NVM_DEV_FACTORY:
  991. return nvm_ioctl_dev_factory(file, argp);
  992. }
  993. return 0;
  994. }
  995. static const struct file_operations _ctl_fops = {
  996. .open = nonseekable_open,
  997. .unlocked_ioctl = nvm_ctl_ioctl,
  998. .owner = THIS_MODULE,
  999. .llseek = noop_llseek,
  1000. };
  1001. static struct miscdevice _nvm_misc = {
  1002. .minor = MISC_DYNAMIC_MINOR,
  1003. .name = "lightnvm",
  1004. .nodename = "lightnvm/control",
  1005. .fops = &_ctl_fops,
  1006. };
  1007. builtin_misc_device(_nvm_misc);