multipath.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /*
  2. * Copyright (c) 2017 Christoph Hellwig.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. */
  13. #include <linux/moduleparam.h>
  14. #include "nvme.h"
  15. static bool multipath = true;
  16. module_param(multipath, bool, 0644);
  17. MODULE_PARM_DESC(multipath,
  18. "turn on native support for multiple controllers per subsystem");
  19. void nvme_failover_req(struct request *req)
  20. {
  21. struct nvme_ns *ns = req->q->queuedata;
  22. unsigned long flags;
  23. spin_lock_irqsave(&ns->head->requeue_lock, flags);
  24. blk_steal_bios(&ns->head->requeue_list, req);
  25. spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
  26. blk_mq_end_request(req, 0);
  27. nvme_reset_ctrl(ns->ctrl);
  28. kblockd_schedule_work(&ns->head->requeue_work);
  29. }
  30. bool nvme_req_needs_failover(struct request *req)
  31. {
  32. if (!(req->cmd_flags & REQ_NVME_MPATH))
  33. return false;
  34. switch (nvme_req(req)->status & 0x7ff) {
  35. /*
  36. * Generic command status:
  37. */
  38. case NVME_SC_INVALID_OPCODE:
  39. case NVME_SC_INVALID_FIELD:
  40. case NVME_SC_INVALID_NS:
  41. case NVME_SC_LBA_RANGE:
  42. case NVME_SC_CAP_EXCEEDED:
  43. case NVME_SC_RESERVATION_CONFLICT:
  44. return false;
  45. /*
  46. * I/O command set specific error. Unfortunately these values are
  47. * reused for fabrics commands, but those should never get here.
  48. */
  49. case NVME_SC_BAD_ATTRIBUTES:
  50. case NVME_SC_INVALID_PI:
  51. case NVME_SC_READ_ONLY:
  52. case NVME_SC_ONCS_NOT_SUPPORTED:
  53. WARN_ON_ONCE(nvme_req(req)->cmd->common.opcode ==
  54. nvme_fabrics_command);
  55. return false;
  56. /*
  57. * Media and Data Integrity Errors:
  58. */
  59. case NVME_SC_WRITE_FAULT:
  60. case NVME_SC_READ_ERROR:
  61. case NVME_SC_GUARD_CHECK:
  62. case NVME_SC_APPTAG_CHECK:
  63. case NVME_SC_REFTAG_CHECK:
  64. case NVME_SC_COMPARE_FAILED:
  65. case NVME_SC_ACCESS_DENIED:
  66. case NVME_SC_UNWRITTEN_BLOCK:
  67. return false;
  68. }
  69. /* Everything else could be a path failure, so should be retried */
  70. return true;
  71. }
  72. void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
  73. {
  74. struct nvme_ns *ns;
  75. mutex_lock(&ctrl->namespaces_mutex);
  76. list_for_each_entry(ns, &ctrl->namespaces, list) {
  77. if (ns->head->disk)
  78. kblockd_schedule_work(&ns->head->requeue_work);
  79. }
  80. mutex_unlock(&ctrl->namespaces_mutex);
  81. }
  82. static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head)
  83. {
  84. struct nvme_ns *ns;
  85. list_for_each_entry_rcu(ns, &head->list, siblings) {
  86. if (ns->ctrl->state == NVME_CTRL_LIVE) {
  87. rcu_assign_pointer(head->current_path, ns);
  88. return ns;
  89. }
  90. }
  91. return NULL;
  92. }
  93. inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
  94. {
  95. struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu);
  96. if (unlikely(!ns || ns->ctrl->state != NVME_CTRL_LIVE))
  97. ns = __nvme_find_path(head);
  98. return ns;
  99. }
  100. static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
  101. struct bio *bio)
  102. {
  103. struct nvme_ns_head *head = q->queuedata;
  104. struct device *dev = disk_to_dev(head->disk);
  105. struct nvme_ns *ns;
  106. blk_qc_t ret = BLK_QC_T_NONE;
  107. int srcu_idx;
  108. srcu_idx = srcu_read_lock(&head->srcu);
  109. ns = nvme_find_path(head);
  110. if (likely(ns)) {
  111. bio->bi_disk = ns->disk;
  112. bio->bi_opf |= REQ_NVME_MPATH;
  113. ret = direct_make_request(bio);
  114. } else if (!list_empty_careful(&head->list)) {
  115. dev_warn_ratelimited(dev, "no path available - requeing I/O\n");
  116. spin_lock_irq(&head->requeue_lock);
  117. bio_list_add(&head->requeue_list, bio);
  118. spin_unlock_irq(&head->requeue_lock);
  119. } else {
  120. dev_warn_ratelimited(dev, "no path - failing I/O\n");
  121. bio->bi_status = BLK_STS_IOERR;
  122. bio_endio(bio);
  123. }
  124. srcu_read_unlock(&head->srcu, srcu_idx);
  125. return ret;
  126. }
  127. static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
  128. {
  129. struct nvme_ns_head *head = q->queuedata;
  130. struct nvme_ns *ns;
  131. bool found = false;
  132. int srcu_idx;
  133. srcu_idx = srcu_read_lock(&head->srcu);
  134. ns = srcu_dereference(head->current_path, &head->srcu);
  135. if (likely(ns && ns->ctrl->state == NVME_CTRL_LIVE))
  136. found = ns->queue->poll_fn(q, qc);
  137. srcu_read_unlock(&head->srcu, srcu_idx);
  138. return found;
  139. }
  140. static void nvme_requeue_work(struct work_struct *work)
  141. {
  142. struct nvme_ns_head *head =
  143. container_of(work, struct nvme_ns_head, requeue_work);
  144. struct bio *bio, *next;
  145. spin_lock_irq(&head->requeue_lock);
  146. next = bio_list_get(&head->requeue_list);
  147. spin_unlock_irq(&head->requeue_lock);
  148. while ((bio = next) != NULL) {
  149. next = bio->bi_next;
  150. bio->bi_next = NULL;
  151. /*
  152. * Reset disk to the mpath node and resubmit to select a new
  153. * path.
  154. */
  155. bio->bi_disk = head->disk;
  156. generic_make_request(bio);
  157. }
  158. }
  159. int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
  160. {
  161. struct request_queue *q;
  162. bool vwc = false;
  163. bio_list_init(&head->requeue_list);
  164. spin_lock_init(&head->requeue_lock);
  165. INIT_WORK(&head->requeue_work, nvme_requeue_work);
  166. /*
  167. * Add a multipath node if the subsystems supports multiple controllers.
  168. * We also do this for private namespaces as the namespace sharing data could
  169. * change after a rescan.
  170. */
  171. if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
  172. return 0;
  173. q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
  174. if (!q)
  175. goto out;
  176. q->queuedata = head;
  177. blk_queue_make_request(q, nvme_ns_head_make_request);
  178. q->poll_fn = nvme_ns_head_poll;
  179. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
  180. /* set to a default value for 512 until disk is validated */
  181. blk_queue_logical_block_size(q, 512);
  182. /* we need to propagate up the VMC settings */
  183. if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
  184. vwc = true;
  185. blk_queue_write_cache(q, vwc, vwc);
  186. head->disk = alloc_disk(0);
  187. if (!head->disk)
  188. goto out_cleanup_queue;
  189. head->disk->fops = &nvme_ns_head_ops;
  190. head->disk->private_data = head;
  191. head->disk->queue = q;
  192. head->disk->flags = GENHD_FL_EXT_DEVT;
  193. sprintf(head->disk->disk_name, "nvme%dn%d",
  194. ctrl->subsys->instance, head->instance);
  195. return 0;
  196. out_cleanup_queue:
  197. blk_cleanup_queue(q);
  198. out:
  199. return -ENOMEM;
  200. }
  201. void nvme_mpath_add_disk(struct nvme_ns_head *head)
  202. {
  203. if (!head->disk)
  204. return;
  205. device_add_disk(&head->subsys->dev, head->disk);
  206. if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
  207. &nvme_ns_id_attr_group))
  208. pr_warn("%s: failed to create sysfs group for identification\n",
  209. head->disk->disk_name);
  210. }
  211. void nvme_mpath_add_disk_links(struct nvme_ns *ns)
  212. {
  213. struct kobject *slave_disk_kobj, *holder_disk_kobj;
  214. if (!ns->head->disk)
  215. return;
  216. slave_disk_kobj = &disk_to_dev(ns->disk)->kobj;
  217. if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj,
  218. kobject_name(slave_disk_kobj)))
  219. return;
  220. holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj;
  221. if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj,
  222. kobject_name(holder_disk_kobj)))
  223. sysfs_remove_link(ns->head->disk->slave_dir,
  224. kobject_name(slave_disk_kobj));
  225. }
  226. void nvme_mpath_remove_disk(struct nvme_ns_head *head)
  227. {
  228. if (!head->disk)
  229. return;
  230. sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
  231. &nvme_ns_id_attr_group);
  232. del_gendisk(head->disk);
  233. blk_set_queue_dying(head->disk->queue);
  234. /* make sure all pending bios are cleaned up */
  235. kblockd_schedule_work(&head->requeue_work);
  236. flush_work(&head->requeue_work);
  237. blk_cleanup_queue(head->disk->queue);
  238. put_disk(head->disk);
  239. }
  240. void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
  241. {
  242. if (!ns->head->disk)
  243. return;
  244. sysfs_remove_link(ns->disk->part0.holder_dir,
  245. kobject_name(&disk_to_dev(ns->head->disk)->kobj));
  246. sysfs_remove_link(ns->head->disk->slave_dir,
  247. kobject_name(&disk_to_dev(ns->disk)->kobj));
  248. }