multipath.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /*
  2. * Copyright (c) 2017 Christoph Hellwig.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. */
  13. #include <linux/moduleparam.h>
  14. #include "nvme.h"
  15. static bool multipath = true;
  16. module_param(multipath, bool, 0644);
  17. MODULE_PARM_DESC(multipath,
  18. "turn on native support for multiple controllers per subsystem");
  19. void nvme_failover_req(struct request *req)
  20. {
  21. struct nvme_ns *ns = req->q->queuedata;
  22. unsigned long flags;
  23. spin_lock_irqsave(&ns->head->requeue_lock, flags);
  24. blk_steal_bios(&ns->head->requeue_list, req);
  25. spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
  26. blk_mq_end_request(req, 0);
  27. nvme_reset_ctrl(ns->ctrl);
  28. kblockd_schedule_work(&ns->head->requeue_work);
  29. }
  30. bool nvme_req_needs_failover(struct request *req, blk_status_t error)
  31. {
  32. if (!(req->cmd_flags & REQ_NVME_MPATH))
  33. return false;
  34. return blk_path_error(error);
  35. }
  36. void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
  37. {
  38. struct nvme_ns *ns;
  39. mutex_lock(&ctrl->namespaces_mutex);
  40. list_for_each_entry(ns, &ctrl->namespaces, list) {
  41. if (ns->head->disk)
  42. kblockd_schedule_work(&ns->head->requeue_work);
  43. }
  44. mutex_unlock(&ctrl->namespaces_mutex);
  45. }
  46. static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head)
  47. {
  48. struct nvme_ns *ns;
  49. list_for_each_entry_rcu(ns, &head->list, siblings) {
  50. if (ns->ctrl->state == NVME_CTRL_LIVE) {
  51. rcu_assign_pointer(head->current_path, ns);
  52. return ns;
  53. }
  54. }
  55. return NULL;
  56. }
  57. inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
  58. {
  59. struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu);
  60. if (unlikely(!ns || ns->ctrl->state != NVME_CTRL_LIVE))
  61. ns = __nvme_find_path(head);
  62. return ns;
  63. }
  64. static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
  65. struct bio *bio)
  66. {
  67. struct nvme_ns_head *head = q->queuedata;
  68. struct device *dev = disk_to_dev(head->disk);
  69. struct nvme_ns *ns;
  70. blk_qc_t ret = BLK_QC_T_NONE;
  71. int srcu_idx;
  72. srcu_idx = srcu_read_lock(&head->srcu);
  73. ns = nvme_find_path(head);
  74. if (likely(ns)) {
  75. bio->bi_disk = ns->disk;
  76. bio->bi_opf |= REQ_NVME_MPATH;
  77. ret = direct_make_request(bio);
  78. } else if (!list_empty_careful(&head->list)) {
  79. dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
  80. spin_lock_irq(&head->requeue_lock);
  81. bio_list_add(&head->requeue_list, bio);
  82. spin_unlock_irq(&head->requeue_lock);
  83. } else {
  84. dev_warn_ratelimited(dev, "no path - failing I/O\n");
  85. bio->bi_status = BLK_STS_IOERR;
  86. bio_endio(bio);
  87. }
  88. srcu_read_unlock(&head->srcu, srcu_idx);
  89. return ret;
  90. }
  91. static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
  92. {
  93. struct nvme_ns_head *head = q->queuedata;
  94. struct nvme_ns *ns;
  95. bool found = false;
  96. int srcu_idx;
  97. srcu_idx = srcu_read_lock(&head->srcu);
  98. ns = srcu_dereference(head->current_path, &head->srcu);
  99. if (likely(ns && ns->ctrl->state == NVME_CTRL_LIVE))
  100. found = ns->queue->poll_fn(q, qc);
  101. srcu_read_unlock(&head->srcu, srcu_idx);
  102. return found;
  103. }
  104. static void nvme_requeue_work(struct work_struct *work)
  105. {
  106. struct nvme_ns_head *head =
  107. container_of(work, struct nvme_ns_head, requeue_work);
  108. struct bio *bio, *next;
  109. spin_lock_irq(&head->requeue_lock);
  110. next = bio_list_get(&head->requeue_list);
  111. spin_unlock_irq(&head->requeue_lock);
  112. while ((bio = next) != NULL) {
  113. next = bio->bi_next;
  114. bio->bi_next = NULL;
  115. /*
  116. * Reset disk to the mpath node and resubmit to select a new
  117. * path.
  118. */
  119. bio->bi_disk = head->disk;
  120. generic_make_request(bio);
  121. }
  122. }
  123. int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
  124. {
  125. struct request_queue *q;
  126. bool vwc = false;
  127. bio_list_init(&head->requeue_list);
  128. spin_lock_init(&head->requeue_lock);
  129. INIT_WORK(&head->requeue_work, nvme_requeue_work);
  130. /*
  131. * Add a multipath node if the subsystems supports multiple controllers.
  132. * We also do this for private namespaces as the namespace sharing data could
  133. * change after a rescan.
  134. */
  135. if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
  136. return 0;
  137. q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
  138. if (!q)
  139. goto out;
  140. q->queuedata = head;
  141. blk_queue_make_request(q, nvme_ns_head_make_request);
  142. q->poll_fn = nvme_ns_head_poll;
  143. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
  144. /* set to a default value for 512 until disk is validated */
  145. blk_queue_logical_block_size(q, 512);
  146. /* we need to propagate up the VMC settings */
  147. if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
  148. vwc = true;
  149. blk_queue_write_cache(q, vwc, vwc);
  150. head->disk = alloc_disk(0);
  151. if (!head->disk)
  152. goto out_cleanup_queue;
  153. head->disk->fops = &nvme_ns_head_ops;
  154. head->disk->private_data = head;
  155. head->disk->queue = q;
  156. head->disk->flags = GENHD_FL_EXT_DEVT;
  157. sprintf(head->disk->disk_name, "nvme%dn%d",
  158. ctrl->subsys->instance, head->instance);
  159. return 0;
  160. out_cleanup_queue:
  161. blk_cleanup_queue(q);
  162. out:
  163. return -ENOMEM;
  164. }
  165. void nvme_mpath_add_disk(struct nvme_ns_head *head)
  166. {
  167. if (!head->disk)
  168. return;
  169. device_add_disk(&head->subsys->dev, head->disk);
  170. if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
  171. &nvme_ns_id_attr_group))
  172. pr_warn("%s: failed to create sysfs group for identification\n",
  173. head->disk->disk_name);
  174. }
  175. void nvme_mpath_add_disk_links(struct nvme_ns *ns)
  176. {
  177. struct kobject *slave_disk_kobj, *holder_disk_kobj;
  178. if (!ns->head->disk)
  179. return;
  180. slave_disk_kobj = &disk_to_dev(ns->disk)->kobj;
  181. if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj,
  182. kobject_name(slave_disk_kobj)))
  183. return;
  184. holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj;
  185. if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj,
  186. kobject_name(holder_disk_kobj)))
  187. sysfs_remove_link(ns->head->disk->slave_dir,
  188. kobject_name(slave_disk_kobj));
  189. }
  190. void nvme_mpath_remove_disk(struct nvme_ns_head *head)
  191. {
  192. if (!head->disk)
  193. return;
  194. sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
  195. &nvme_ns_id_attr_group);
  196. del_gendisk(head->disk);
  197. blk_set_queue_dying(head->disk->queue);
  198. /* make sure all pending bios are cleaned up */
  199. kblockd_schedule_work(&head->requeue_work);
  200. flush_work(&head->requeue_work);
  201. blk_cleanup_queue(head->disk->queue);
  202. put_disk(head->disk);
  203. }
  204. void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
  205. {
  206. if (!ns->head->disk)
  207. return;
  208. sysfs_remove_link(ns->disk->part0.holder_dir,
  209. kobject_name(&disk_to_dev(ns->head->disk)->kobj));
  210. sysfs_remove_link(ns->head->disk->slave_dir,
  211. kobject_name(&disk_to_dev(ns->disk)->kobj));
  212. }