core.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021
  1. /*
  2. * Common code for the NVMe target.
  3. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. */
  14. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15. #include <linux/module.h>
  16. #include <linux/random.h>
  17. #include <linux/rculist.h>
  18. #include "nvmet.h"
  19. static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
  20. static DEFINE_IDA(cntlid_ida);
  21. /*
  22. * This read/write semaphore is used to synchronize access to configuration
  23. * information on a target system that will result in discovery log page
  24. * information change for at least one host.
  25. * The full list of resources to protected by this semaphore is:
  26. *
  27. * - subsystems list
  28. * - per-subsystem allowed hosts list
  29. * - allow_any_host subsystem attribute
  30. * - nvmet_genctr
  31. * - the nvmet_transports array
  32. *
  33. * When updating any of those lists/structures write lock should be obtained,
  34. * while when reading (popolating discovery log page or checking host-subsystem
  35. * link) read lock is obtained to allow concurrent reads.
  36. */
  37. DECLARE_RWSEM(nvmet_config_sem);
  38. static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
  39. const char *subsysnqn);
  40. u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
  41. size_t len)
  42. {
  43. if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
  44. return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
  45. return 0;
  46. }
  47. u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
  48. {
  49. if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
  50. return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
  51. return 0;
  52. }
  53. static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
  54. {
  55. return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
  56. }
  57. static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
  58. {
  59. struct nvmet_req *req;
  60. while (1) {
  61. mutex_lock(&ctrl->lock);
  62. if (!ctrl->nr_async_event_cmds) {
  63. mutex_unlock(&ctrl->lock);
  64. return;
  65. }
  66. req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
  67. mutex_unlock(&ctrl->lock);
  68. nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
  69. }
  70. }
  71. static void nvmet_async_event_work(struct work_struct *work)
  72. {
  73. struct nvmet_ctrl *ctrl =
  74. container_of(work, struct nvmet_ctrl, async_event_work);
  75. struct nvmet_async_event *aen;
  76. struct nvmet_req *req;
  77. while (1) {
  78. mutex_lock(&ctrl->lock);
  79. aen = list_first_entry_or_null(&ctrl->async_events,
  80. struct nvmet_async_event, entry);
  81. if (!aen || !ctrl->nr_async_event_cmds) {
  82. mutex_unlock(&ctrl->lock);
  83. return;
  84. }
  85. req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
  86. nvmet_set_result(req, nvmet_async_event_result(aen));
  87. list_del(&aen->entry);
  88. kfree(aen);
  89. mutex_unlock(&ctrl->lock);
  90. nvmet_req_complete(req, 0);
  91. }
  92. }
  93. static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
  94. u8 event_info, u8 log_page)
  95. {
  96. struct nvmet_async_event *aen;
  97. aen = kmalloc(sizeof(*aen), GFP_KERNEL);
  98. if (!aen)
  99. return;
  100. aen->event_type = event_type;
  101. aen->event_info = event_info;
  102. aen->log_page = log_page;
  103. mutex_lock(&ctrl->lock);
  104. list_add_tail(&aen->entry, &ctrl->async_events);
  105. mutex_unlock(&ctrl->lock);
  106. schedule_work(&ctrl->async_event_work);
  107. }
  108. int nvmet_register_transport(struct nvmet_fabrics_ops *ops)
  109. {
  110. int ret = 0;
  111. down_write(&nvmet_config_sem);
  112. if (nvmet_transports[ops->type])
  113. ret = -EINVAL;
  114. else
  115. nvmet_transports[ops->type] = ops;
  116. up_write(&nvmet_config_sem);
  117. return ret;
  118. }
  119. EXPORT_SYMBOL_GPL(nvmet_register_transport);
  120. void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops)
  121. {
  122. down_write(&nvmet_config_sem);
  123. nvmet_transports[ops->type] = NULL;
  124. up_write(&nvmet_config_sem);
  125. }
  126. EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
  127. int nvmet_enable_port(struct nvmet_port *port)
  128. {
  129. struct nvmet_fabrics_ops *ops;
  130. int ret;
  131. lockdep_assert_held(&nvmet_config_sem);
  132. ops = nvmet_transports[port->disc_addr.trtype];
  133. if (!ops) {
  134. up_write(&nvmet_config_sem);
  135. request_module("nvmet-transport-%d", port->disc_addr.trtype);
  136. down_write(&nvmet_config_sem);
  137. ops = nvmet_transports[port->disc_addr.trtype];
  138. if (!ops) {
  139. pr_err("transport type %d not supported\n",
  140. port->disc_addr.trtype);
  141. return -EINVAL;
  142. }
  143. }
  144. if (!try_module_get(ops->owner))
  145. return -EINVAL;
  146. ret = ops->add_port(port);
  147. if (ret) {
  148. module_put(ops->owner);
  149. return ret;
  150. }
  151. port->enabled = true;
  152. return 0;
  153. }
  154. void nvmet_disable_port(struct nvmet_port *port)
  155. {
  156. struct nvmet_fabrics_ops *ops;
  157. lockdep_assert_held(&nvmet_config_sem);
  158. port->enabled = false;
  159. ops = nvmet_transports[port->disc_addr.trtype];
  160. ops->remove_port(port);
  161. module_put(ops->owner);
  162. }
  163. static void nvmet_keep_alive_timer(struct work_struct *work)
  164. {
  165. struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
  166. struct nvmet_ctrl, ka_work);
  167. pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
  168. ctrl->cntlid, ctrl->kato);
  169. nvmet_ctrl_fatal_error(ctrl);
  170. }
  171. static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
  172. {
  173. pr_debug("ctrl %d start keep-alive timer for %d secs\n",
  174. ctrl->cntlid, ctrl->kato);
  175. INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
  176. schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
  177. }
  178. static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
  179. {
  180. pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
  181. cancel_delayed_work_sync(&ctrl->ka_work);
  182. }
  183. static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
  184. __le32 nsid)
  185. {
  186. struct nvmet_ns *ns;
  187. list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
  188. if (ns->nsid == le32_to_cpu(nsid))
  189. return ns;
  190. }
  191. return NULL;
  192. }
  193. struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
  194. {
  195. struct nvmet_ns *ns;
  196. rcu_read_lock();
  197. ns = __nvmet_find_namespace(ctrl, nsid);
  198. if (ns)
  199. percpu_ref_get(&ns->ref);
  200. rcu_read_unlock();
  201. return ns;
  202. }
  203. static void nvmet_destroy_namespace(struct percpu_ref *ref)
  204. {
  205. struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
  206. complete(&ns->disable_done);
  207. }
  208. void nvmet_put_namespace(struct nvmet_ns *ns)
  209. {
  210. percpu_ref_put(&ns->ref);
  211. }
  212. int nvmet_ns_enable(struct nvmet_ns *ns)
  213. {
  214. struct nvmet_subsys *subsys = ns->subsys;
  215. struct nvmet_ctrl *ctrl;
  216. int ret = 0;
  217. mutex_lock(&subsys->lock);
  218. if (ns->enabled)
  219. goto out_unlock;
  220. ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
  221. NULL);
  222. if (IS_ERR(ns->bdev)) {
  223. pr_err("failed to open block device %s: (%ld)\n",
  224. ns->device_path, PTR_ERR(ns->bdev));
  225. ret = PTR_ERR(ns->bdev);
  226. ns->bdev = NULL;
  227. goto out_unlock;
  228. }
  229. ns->size = i_size_read(ns->bdev->bd_inode);
  230. ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
  231. ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
  232. 0, GFP_KERNEL);
  233. if (ret)
  234. goto out_blkdev_put;
  235. if (ns->nsid > subsys->max_nsid)
  236. subsys->max_nsid = ns->nsid;
  237. /*
  238. * The namespaces list needs to be sorted to simplify the implementation
  239. * of the Identify Namepace List subcommand.
  240. */
  241. if (list_empty(&subsys->namespaces)) {
  242. list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
  243. } else {
  244. struct nvmet_ns *old;
  245. list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
  246. BUG_ON(ns->nsid == old->nsid);
  247. if (ns->nsid < old->nsid)
  248. break;
  249. }
  250. list_add_tail_rcu(&ns->dev_link, &old->dev_link);
  251. }
  252. list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
  253. nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
  254. ns->enabled = true;
  255. ret = 0;
  256. out_unlock:
  257. mutex_unlock(&subsys->lock);
  258. return ret;
  259. out_blkdev_put:
  260. blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
  261. ns->bdev = NULL;
  262. goto out_unlock;
  263. }
  264. void nvmet_ns_disable(struct nvmet_ns *ns)
  265. {
  266. struct nvmet_subsys *subsys = ns->subsys;
  267. struct nvmet_ctrl *ctrl;
  268. mutex_lock(&subsys->lock);
  269. if (!ns->enabled)
  270. goto out_unlock;
  271. ns->enabled = false;
  272. list_del_rcu(&ns->dev_link);
  273. mutex_unlock(&subsys->lock);
  274. /*
  275. * Now that we removed the namespaces from the lookup list, we
  276. * can kill the per_cpu ref and wait for any remaining references
  277. * to be dropped, as well as a RCU grace period for anyone only
  278. * using the namepace under rcu_read_lock(). Note that we can't
  279. * use call_rcu here as we need to ensure the namespaces have
  280. * been fully destroyed before unloading the module.
  281. */
  282. percpu_ref_kill(&ns->ref);
  283. synchronize_rcu();
  284. wait_for_completion(&ns->disable_done);
  285. percpu_ref_exit(&ns->ref);
  286. mutex_lock(&subsys->lock);
  287. list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
  288. nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
  289. if (ns->bdev)
  290. blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
  291. out_unlock:
  292. mutex_unlock(&subsys->lock);
  293. }
  294. void nvmet_ns_free(struct nvmet_ns *ns)
  295. {
  296. nvmet_ns_disable(ns);
  297. kfree(ns->device_path);
  298. kfree(ns);
  299. }
  300. struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
  301. {
  302. struct nvmet_ns *ns;
  303. ns = kzalloc(sizeof(*ns), GFP_KERNEL);
  304. if (!ns)
  305. return NULL;
  306. INIT_LIST_HEAD(&ns->dev_link);
  307. init_completion(&ns->disable_done);
  308. ns->nsid = nsid;
  309. ns->subsys = subsys;
  310. uuid_gen(&ns->uuid);
  311. return ns;
  312. }
  313. static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
  314. {
  315. if (status)
  316. nvmet_set_status(req, status);
  317. if (req->sq->size)
  318. req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size;
  319. req->rsp->sq_head = cpu_to_le16(req->sq->sqhd);
  320. req->rsp->sq_id = cpu_to_le16(req->sq->qid);
  321. req->rsp->command_id = req->cmd->common.command_id;
  322. if (req->ns)
  323. nvmet_put_namespace(req->ns);
  324. req->ops->queue_response(req);
  325. }
  326. void nvmet_req_complete(struct nvmet_req *req, u16 status)
  327. {
  328. __nvmet_req_complete(req, status);
  329. percpu_ref_put(&req->sq->ref);
  330. }
  331. EXPORT_SYMBOL_GPL(nvmet_req_complete);
  332. void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
  333. u16 qid, u16 size)
  334. {
  335. cq->qid = qid;
  336. cq->size = size;
  337. ctrl->cqs[qid] = cq;
  338. }
  339. void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
  340. u16 qid, u16 size)
  341. {
  342. sq->sqhd = 0;
  343. sq->qid = qid;
  344. sq->size = size;
  345. ctrl->sqs[qid] = sq;
  346. }
  347. static void nvmet_confirm_sq(struct percpu_ref *ref)
  348. {
  349. struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
  350. complete(&sq->confirm_done);
  351. }
  352. void nvmet_sq_destroy(struct nvmet_sq *sq)
  353. {
  354. /*
  355. * If this is the admin queue, complete all AERs so that our
  356. * queue doesn't have outstanding requests on it.
  357. */
  358. if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
  359. nvmet_async_events_free(sq->ctrl);
  360. percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
  361. wait_for_completion(&sq->confirm_done);
  362. wait_for_completion(&sq->free_done);
  363. percpu_ref_exit(&sq->ref);
  364. if (sq->ctrl) {
  365. nvmet_ctrl_put(sq->ctrl);
  366. sq->ctrl = NULL; /* allows reusing the queue later */
  367. }
  368. }
  369. EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
  370. static void nvmet_sq_free(struct percpu_ref *ref)
  371. {
  372. struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
  373. complete(&sq->free_done);
  374. }
  375. int nvmet_sq_init(struct nvmet_sq *sq)
  376. {
  377. int ret;
  378. ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
  379. if (ret) {
  380. pr_err("percpu_ref init failed!\n");
  381. return ret;
  382. }
  383. init_completion(&sq->free_done);
  384. init_completion(&sq->confirm_done);
  385. return 0;
  386. }
  387. EXPORT_SYMBOL_GPL(nvmet_sq_init);
  388. bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
  389. struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops)
  390. {
  391. u8 flags = req->cmd->common.flags;
  392. u16 status;
  393. req->cq = cq;
  394. req->sq = sq;
  395. req->ops = ops;
  396. req->sg = NULL;
  397. req->sg_cnt = 0;
  398. req->rsp->status = 0;
  399. /* no support for fused commands yet */
  400. if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
  401. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  402. goto fail;
  403. }
  404. /* either variant of SGLs is fine, as we don't support metadata */
  405. if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
  406. (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
  407. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  408. goto fail;
  409. }
  410. if (unlikely(!req->sq->ctrl))
  411. /* will return an error for any Non-connect command: */
  412. status = nvmet_parse_connect_cmd(req);
  413. else if (likely(req->sq->qid != 0))
  414. status = nvmet_parse_io_cmd(req);
  415. else if (req->cmd->common.opcode == nvme_fabrics_command)
  416. status = nvmet_parse_fabrics_cmd(req);
  417. else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
  418. status = nvmet_parse_discovery_cmd(req);
  419. else
  420. status = nvmet_parse_admin_cmd(req);
  421. if (status)
  422. goto fail;
  423. if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
  424. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  425. goto fail;
  426. }
  427. return true;
  428. fail:
  429. __nvmet_req_complete(req, status);
  430. return false;
  431. }
  432. EXPORT_SYMBOL_GPL(nvmet_req_init);
  433. void nvmet_req_uninit(struct nvmet_req *req)
  434. {
  435. percpu_ref_put(&req->sq->ref);
  436. }
  437. EXPORT_SYMBOL_GPL(nvmet_req_uninit);
  438. static inline bool nvmet_cc_en(u32 cc)
  439. {
  440. return (cc >> NVME_CC_EN_SHIFT) & 0x1;
  441. }
  442. static inline u8 nvmet_cc_css(u32 cc)
  443. {
  444. return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
  445. }
  446. static inline u8 nvmet_cc_mps(u32 cc)
  447. {
  448. return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
  449. }
  450. static inline u8 nvmet_cc_ams(u32 cc)
  451. {
  452. return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
  453. }
  454. static inline u8 nvmet_cc_shn(u32 cc)
  455. {
  456. return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
  457. }
  458. static inline u8 nvmet_cc_iosqes(u32 cc)
  459. {
  460. return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
  461. }
  462. static inline u8 nvmet_cc_iocqes(u32 cc)
  463. {
  464. return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
  465. }
  466. static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
  467. {
  468. lockdep_assert_held(&ctrl->lock);
  469. if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
  470. nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
  471. nvmet_cc_mps(ctrl->cc) != 0 ||
  472. nvmet_cc_ams(ctrl->cc) != 0 ||
  473. nvmet_cc_css(ctrl->cc) != 0) {
  474. ctrl->csts = NVME_CSTS_CFS;
  475. return;
  476. }
  477. ctrl->csts = NVME_CSTS_RDY;
  478. }
  479. static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
  480. {
  481. lockdep_assert_held(&ctrl->lock);
  482. /* XXX: tear down queues? */
  483. ctrl->csts &= ~NVME_CSTS_RDY;
  484. ctrl->cc = 0;
  485. }
  486. void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
  487. {
  488. u32 old;
  489. mutex_lock(&ctrl->lock);
  490. old = ctrl->cc;
  491. ctrl->cc = new;
  492. if (nvmet_cc_en(new) && !nvmet_cc_en(old))
  493. nvmet_start_ctrl(ctrl);
  494. if (!nvmet_cc_en(new) && nvmet_cc_en(old))
  495. nvmet_clear_ctrl(ctrl);
  496. if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
  497. nvmet_clear_ctrl(ctrl);
  498. ctrl->csts |= NVME_CSTS_SHST_CMPLT;
  499. }
  500. if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
  501. ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
  502. mutex_unlock(&ctrl->lock);
  503. }
  504. static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
  505. {
  506. /* command sets supported: NVMe command set: */
  507. ctrl->cap = (1ULL << 37);
  508. /* CC.EN timeout in 500msec units: */
  509. ctrl->cap |= (15ULL << 24);
  510. /* maximum queue entries supported: */
  511. ctrl->cap |= NVMET_QUEUE_SIZE - 1;
  512. }
  513. u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
  514. struct nvmet_req *req, struct nvmet_ctrl **ret)
  515. {
  516. struct nvmet_subsys *subsys;
  517. struct nvmet_ctrl *ctrl;
  518. u16 status = 0;
  519. subsys = nvmet_find_get_subsys(req->port, subsysnqn);
  520. if (!subsys) {
  521. pr_warn("connect request for invalid subsystem %s!\n",
  522. subsysnqn);
  523. req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
  524. return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
  525. }
  526. mutex_lock(&subsys->lock);
  527. list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
  528. if (ctrl->cntlid == cntlid) {
  529. if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
  530. pr_warn("hostnqn mismatch.\n");
  531. continue;
  532. }
  533. if (!kref_get_unless_zero(&ctrl->ref))
  534. continue;
  535. *ret = ctrl;
  536. goto out;
  537. }
  538. }
  539. pr_warn("could not find controller %d for subsys %s / host %s\n",
  540. cntlid, subsysnqn, hostnqn);
  541. req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
  542. status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
  543. out:
  544. mutex_unlock(&subsys->lock);
  545. nvmet_subsys_put(subsys);
  546. return status;
  547. }
  548. u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
  549. {
  550. if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
  551. pr_err("got io cmd %d while CC.EN == 0 on qid = %d\n",
  552. cmd->common.opcode, req->sq->qid);
  553. return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
  554. }
  555. if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
  556. pr_err("got io cmd %d while CSTS.RDY == 0 on qid = %d\n",
  557. cmd->common.opcode, req->sq->qid);
  558. req->ns = NULL;
  559. return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
  560. }
  561. return 0;
  562. }
  563. static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
  564. const char *hostnqn)
  565. {
  566. struct nvmet_host_link *p;
  567. if (subsys->allow_any_host)
  568. return true;
  569. list_for_each_entry(p, &subsys->hosts, entry) {
  570. if (!strcmp(nvmet_host_name(p->host), hostnqn))
  571. return true;
  572. }
  573. return false;
  574. }
  575. static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
  576. const char *hostnqn)
  577. {
  578. struct nvmet_subsys_link *s;
  579. list_for_each_entry(s, &req->port->subsystems, entry) {
  580. if (__nvmet_host_allowed(s->subsys, hostnqn))
  581. return true;
  582. }
  583. return false;
  584. }
  585. bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
  586. const char *hostnqn)
  587. {
  588. lockdep_assert_held(&nvmet_config_sem);
  589. if (subsys->type == NVME_NQN_DISC)
  590. return nvmet_host_discovery_allowed(req, hostnqn);
  591. else
  592. return __nvmet_host_allowed(subsys, hostnqn);
  593. }
  594. u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
  595. struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
  596. {
  597. struct nvmet_subsys *subsys;
  598. struct nvmet_ctrl *ctrl;
  599. int ret;
  600. u16 status;
  601. status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
  602. subsys = nvmet_find_get_subsys(req->port, subsysnqn);
  603. if (!subsys) {
  604. pr_warn("connect request for invalid subsystem %s!\n",
  605. subsysnqn);
  606. req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
  607. goto out;
  608. }
  609. status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
  610. down_read(&nvmet_config_sem);
  611. if (!nvmet_host_allowed(req, subsys, hostnqn)) {
  612. pr_info("connect by host %s for subsystem %s not allowed\n",
  613. hostnqn, subsysnqn);
  614. req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
  615. up_read(&nvmet_config_sem);
  616. status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
  617. goto out_put_subsystem;
  618. }
  619. up_read(&nvmet_config_sem);
  620. status = NVME_SC_INTERNAL;
  621. ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
  622. if (!ctrl)
  623. goto out_put_subsystem;
  624. mutex_init(&ctrl->lock);
  625. nvmet_init_cap(ctrl);
  626. INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
  627. INIT_LIST_HEAD(&ctrl->async_events);
  628. memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
  629. memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
  630. kref_init(&ctrl->ref);
  631. ctrl->subsys = subsys;
  632. ctrl->cqs = kcalloc(subsys->max_qid + 1,
  633. sizeof(struct nvmet_cq *),
  634. GFP_KERNEL);
  635. if (!ctrl->cqs)
  636. goto out_free_ctrl;
  637. ctrl->sqs = kcalloc(subsys->max_qid + 1,
  638. sizeof(struct nvmet_sq *),
  639. GFP_KERNEL);
  640. if (!ctrl->sqs)
  641. goto out_free_cqs;
  642. ret = ida_simple_get(&cntlid_ida,
  643. NVME_CNTLID_MIN, NVME_CNTLID_MAX,
  644. GFP_KERNEL);
  645. if (ret < 0) {
  646. status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
  647. goto out_free_sqs;
  648. }
  649. ctrl->cntlid = ret;
  650. ctrl->ops = req->ops;
  651. if (ctrl->subsys->type == NVME_NQN_DISC) {
  652. /* Don't accept keep-alive timeout for discovery controllers */
  653. if (kato) {
  654. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  655. goto out_free_sqs;
  656. }
  657. /*
  658. * Discovery controllers use some arbitrary high value in order
  659. * to cleanup stale discovery sessions
  660. *
  661. * From the latest base diff RC:
  662. * "The Keep Alive command is not supported by
  663. * Discovery controllers. A transport may specify a
  664. * fixed Discovery controller activity timeout value
  665. * (e.g., 2 minutes). If no commands are received
  666. * by a Discovery controller within that time
  667. * period, the controller may perform the
  668. * actions for Keep Alive Timer expiration".
  669. */
  670. ctrl->kato = NVMET_DISC_KATO;
  671. } else {
  672. /* keep-alive timeout in seconds */
  673. ctrl->kato = DIV_ROUND_UP(kato, 1000);
  674. }
  675. nvmet_start_keep_alive_timer(ctrl);
  676. mutex_lock(&subsys->lock);
  677. list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
  678. mutex_unlock(&subsys->lock);
  679. *ctrlp = ctrl;
  680. return 0;
  681. out_free_sqs:
  682. kfree(ctrl->sqs);
  683. out_free_cqs:
  684. kfree(ctrl->cqs);
  685. out_free_ctrl:
  686. kfree(ctrl);
  687. out_put_subsystem:
  688. nvmet_subsys_put(subsys);
  689. out:
  690. return status;
  691. }
  692. static void nvmet_ctrl_free(struct kref *ref)
  693. {
  694. struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
  695. struct nvmet_subsys *subsys = ctrl->subsys;
  696. nvmet_stop_keep_alive_timer(ctrl);
  697. mutex_lock(&subsys->lock);
  698. list_del(&ctrl->subsys_entry);
  699. mutex_unlock(&subsys->lock);
  700. flush_work(&ctrl->async_event_work);
  701. cancel_work_sync(&ctrl->fatal_err_work);
  702. ida_simple_remove(&cntlid_ida, ctrl->cntlid);
  703. nvmet_subsys_put(subsys);
  704. kfree(ctrl->sqs);
  705. kfree(ctrl->cqs);
  706. kfree(ctrl);
  707. }
  708. void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
  709. {
  710. kref_put(&ctrl->ref, nvmet_ctrl_free);
  711. }
  712. static void nvmet_fatal_error_handler(struct work_struct *work)
  713. {
  714. struct nvmet_ctrl *ctrl =
  715. container_of(work, struct nvmet_ctrl, fatal_err_work);
  716. pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
  717. ctrl->ops->delete_ctrl(ctrl);
  718. }
  719. void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
  720. {
  721. mutex_lock(&ctrl->lock);
  722. if (!(ctrl->csts & NVME_CSTS_CFS)) {
  723. ctrl->csts |= NVME_CSTS_CFS;
  724. INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
  725. schedule_work(&ctrl->fatal_err_work);
  726. }
  727. mutex_unlock(&ctrl->lock);
  728. }
  729. EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
  730. static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
  731. const char *subsysnqn)
  732. {
  733. struct nvmet_subsys_link *p;
  734. if (!port)
  735. return NULL;
  736. if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
  737. NVMF_NQN_SIZE)) {
  738. if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
  739. return NULL;
  740. return nvmet_disc_subsys;
  741. }
  742. down_read(&nvmet_config_sem);
  743. list_for_each_entry(p, &port->subsystems, entry) {
  744. if (!strncmp(p->subsys->subsysnqn, subsysnqn,
  745. NVMF_NQN_SIZE)) {
  746. if (!kref_get_unless_zero(&p->subsys->ref))
  747. break;
  748. up_read(&nvmet_config_sem);
  749. return p->subsys;
  750. }
  751. }
  752. up_read(&nvmet_config_sem);
  753. return NULL;
  754. }
  755. struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
  756. enum nvme_subsys_type type)
  757. {
  758. struct nvmet_subsys *subsys;
  759. subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
  760. if (!subsys)
  761. return NULL;
  762. subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
  763. /* generate a random serial number as our controllers are ephemeral: */
  764. get_random_bytes(&subsys->serial, sizeof(subsys->serial));
  765. switch (type) {
  766. case NVME_NQN_NVME:
  767. subsys->max_qid = NVMET_NR_QUEUES;
  768. break;
  769. case NVME_NQN_DISC:
  770. subsys->max_qid = 0;
  771. break;
  772. default:
  773. pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
  774. kfree(subsys);
  775. return NULL;
  776. }
  777. subsys->type = type;
  778. subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
  779. GFP_KERNEL);
  780. if (!subsys->subsysnqn) {
  781. kfree(subsys);
  782. return NULL;
  783. }
  784. kref_init(&subsys->ref);
  785. mutex_init(&subsys->lock);
  786. INIT_LIST_HEAD(&subsys->namespaces);
  787. INIT_LIST_HEAD(&subsys->ctrls);
  788. INIT_LIST_HEAD(&subsys->hosts);
  789. return subsys;
  790. }
  791. static void nvmet_subsys_free(struct kref *ref)
  792. {
  793. struct nvmet_subsys *subsys =
  794. container_of(ref, struct nvmet_subsys, ref);
  795. WARN_ON_ONCE(!list_empty(&subsys->namespaces));
  796. kfree(subsys->subsysnqn);
  797. kfree(subsys);
  798. }
  799. void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
  800. {
  801. struct nvmet_ctrl *ctrl;
  802. mutex_lock(&subsys->lock);
  803. list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
  804. ctrl->ops->delete_ctrl(ctrl);
  805. mutex_unlock(&subsys->lock);
  806. }
  807. void nvmet_subsys_put(struct nvmet_subsys *subsys)
  808. {
  809. kref_put(&subsys->ref, nvmet_subsys_free);
  810. }
  811. static int __init nvmet_init(void)
  812. {
  813. int error;
  814. error = nvmet_init_discovery();
  815. if (error)
  816. goto out;
  817. error = nvmet_init_configfs();
  818. if (error)
  819. goto out_exit_discovery;
  820. return 0;
  821. out_exit_discovery:
  822. nvmet_exit_discovery();
  823. out:
  824. return error;
  825. }
  826. static void __exit nvmet_exit(void)
  827. {
  828. nvmet_exit_configfs();
  829. nvmet_exit_discovery();
  830. ida_destroy(&cntlid_ida);
  831. BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
  832. BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
  833. }
  834. module_init(nvmet_init);
  835. module_exit(nvmet_exit);
  836. MODULE_LICENSE("GPL v2");