Эх сурвалжийг харах

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A set of fixes that should go into this release. This contains:

   - An NVMe pull request from Christoph, with a few critical fixes for
     NVMe.

   - A block drain queue fix from Ming.

   - The concurrent lo_open/release fix for loop"

* 'for-linus' of git://git.kernel.dk/linux-block:
  loop: fix concurrent lo_open/lo_release
  block: drain queue before waiting for q_usage_counter becoming zero
  nvme-fcloop: avoid possible uninitialized variable warning
  nvme-mpath: fix last path removal during traffic
  nvme-rdma: fix concurrent reset and reconnect
  nvme: fix sector units when going between formats
  nvme-pci: move use_sgl initialization to nvme_init_iod()
Linus Torvalds 7 жил өмнө
parent
commit
d476c5334f

+ 7 - 2
block/blk-core.c

@@ -562,6 +562,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
 	}
 	}
 }
 }
 
 
+void blk_drain_queue(struct request_queue *q)
+{
+	spin_lock_irq(q->queue_lock);
+	__blk_drain_queue(q, true);
+	spin_unlock_irq(q->queue_lock);
+}
+
 /**
 /**
  * blk_queue_bypass_start - enter queue bypass mode
  * blk_queue_bypass_start - enter queue bypass mode
  * @q: queue of interest
  * @q: queue of interest
@@ -689,8 +696,6 @@ void blk_cleanup_queue(struct request_queue *q)
 	 */
 	 */
 	blk_freeze_queue(q);
 	blk_freeze_queue(q);
 	spin_lock_irq(lock);
 	spin_lock_irq(lock);
-	if (!q->mq_ops)
-		__blk_drain_queue(q, true);
 	queue_flag_set(QUEUE_FLAG_DEAD, q);
 	queue_flag_set(QUEUE_FLAG_DEAD, q);
 	spin_unlock_irq(lock);
 	spin_unlock_irq(lock);
 
 

+ 2 - 0
block/blk-mq.c

@@ -161,6 +161,8 @@ void blk_freeze_queue(struct request_queue *q)
 	 * exported to drivers as the only user for unfreeze is blk_mq.
 	 * exported to drivers as the only user for unfreeze is blk_mq.
 	 */
 	 */
 	blk_freeze_queue_start(q);
 	blk_freeze_queue_start(q);
+	if (!q->mq_ops)
+		blk_drain_queue(q);
 	blk_mq_freeze_queue_wait(q);
 	blk_mq_freeze_queue_wait(q);
 }
 }
 
 

+ 2 - 0
block/blk.h

@@ -330,4 +330,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
 }
 }
 #endif /* CONFIG_BOUNCE */
 #endif /* CONFIG_BOUNCE */
 
 
+extern void blk_drain_queue(struct request_queue *q);
+
 #endif /* BLK_INTERNAL_H */
 #endif /* BLK_INTERNAL_H */

+ 8 - 2
drivers/block/loop.c

@@ -1581,9 +1581,8 @@ out:
 	return err;
 	return err;
 }
 }
 
 
-static void lo_release(struct gendisk *disk, fmode_t mode)
+static void __lo_release(struct loop_device *lo)
 {
 {
-	struct loop_device *lo = disk->private_data;
 	int err;
 	int err;
 
 
 	if (atomic_dec_return(&lo->lo_refcnt))
 	if (atomic_dec_return(&lo->lo_refcnt))
@@ -1610,6 +1609,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
 	mutex_unlock(&lo->lo_ctl_mutex);
 	mutex_unlock(&lo->lo_ctl_mutex);
 }
 }
 
 
+static void lo_release(struct gendisk *disk, fmode_t mode)
+{
+	mutex_lock(&loop_index_mutex);
+	__lo_release(disk->private_data);
+	mutex_unlock(&loop_index_mutex);
+}
+
 static const struct block_device_operations lo_fops = {
 static const struct block_device_operations lo_fops = {
 	.owner =	THIS_MODULE,
 	.owner =	THIS_MODULE,
 	.open =		lo_open,
 	.open =		lo_open,

+ 6 - 1
drivers/nvme/host/core.c

@@ -1335,6 +1335,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
 		struct nvme_ns *ns, struct nvme_id_ns *id)
 		struct nvme_ns *ns, struct nvme_id_ns *id)
 {
 {
 	sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
 	sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
+	unsigned short bs = 1 << ns->lba_shift;
 	unsigned stream_alignment = 0;
 	unsigned stream_alignment = 0;
 
 
 	if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
 	if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
@@ -1343,7 +1344,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
 	blk_mq_freeze_queue(disk->queue);
 	blk_mq_freeze_queue(disk->queue);
 	blk_integrity_unregister(disk);
 	blk_integrity_unregister(disk);
 
 
-	blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift);
+	blk_queue_logical_block_size(disk->queue, bs);
+	blk_queue_physical_block_size(disk->queue, bs);
+	blk_queue_io_min(disk->queue, bs);
+
 	if (ns->ms && !ns->ext &&
 	if (ns->ms && !ns->ext &&
 	    (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
 	    (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
 		nvme_init_integrity(disk, ns->ms, ns->pi_type);
 		nvme_init_integrity(disk, ns->ms, ns->pi_type);
@@ -2987,6 +2991,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 	mutex_unlock(&ns->ctrl->namespaces_mutex);
 	mutex_unlock(&ns->ctrl->namespaces_mutex);
 
 
 	synchronize_srcu(&ns->head->srcu);
 	synchronize_srcu(&ns->head->srcu);
+	nvme_mpath_check_last_path(ns);
 	nvme_put_ns(ns);
 	nvme_put_ns(ns);
 }
 }
 
 

+ 12 - 0
drivers/nvme/host/nvme.h

@@ -417,6 +417,15 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
 		rcu_assign_pointer(head->current_path, NULL);
 		rcu_assign_pointer(head->current_path, NULL);
 }
 }
 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+
+static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+{
+	struct nvme_ns_head *head = ns->head;
+
+	if (head->disk && list_empty(&head->list))
+		kblockd_schedule_work(&head->requeue_work);
+}
+
 #else
 #else
 static inline void nvme_failover_req(struct request *req)
 static inline void nvme_failover_req(struct request *req)
 {
 {
@@ -448,6 +457,9 @@ static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
 static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
 static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
 {
 {
 }
 }
+static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+{
+}
 #endif /* CONFIG_NVME_MULTIPATH */
 #endif /* CONFIG_NVME_MULTIPATH */
 
 
 #ifdef CONFIG_NVM
 #ifdef CONFIG_NVM

+ 20 - 22
drivers/nvme/host/pci.c

@@ -448,12 +448,31 @@ static void **nvme_pci_iod_list(struct request *req)
 	return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
 	return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
 }
 }
 
 
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
+{
+	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+	unsigned int avg_seg_size;
+
+	avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
+			blk_rq_nr_phys_segments(req));
+
+	if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
+		return false;
+	if (!iod->nvmeq->qid)
+		return false;
+	if (!sgl_threshold || avg_seg_size < sgl_threshold)
+		return false;
+	return true;
+}
+
 static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
 static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
 {
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
 	int nseg = blk_rq_nr_phys_segments(rq);
 	int nseg = blk_rq_nr_phys_segments(rq);
 	unsigned int size = blk_rq_payload_bytes(rq);
 	unsigned int size = blk_rq_payload_bytes(rq);
 
 
+	iod->use_sgl = nvme_pci_use_sgls(dev, rq);
+
 	if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
 	if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
 		size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
 		size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
 				iod->use_sgl);
 				iod->use_sgl);
@@ -604,8 +623,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
 	dma_addr_t prp_dma;
 	dma_addr_t prp_dma;
 	int nprps, i;
 	int nprps, i;
 
 
-	iod->use_sgl = false;
-
 	length -= (page_size - offset);
 	length -= (page_size - offset);
 	if (length <= 0) {
 	if (length <= 0) {
 		iod->first_dma = 0;
 		iod->first_dma = 0;
@@ -715,8 +732,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
 	int entries = iod->nents, i = 0;
 	int entries = iod->nents, i = 0;
 	dma_addr_t sgl_dma;
 	dma_addr_t sgl_dma;
 
 
-	iod->use_sgl = true;
-
 	/* setting the transfer type as SGL */
 	/* setting the transfer type as SGL */
 	cmd->flags = NVME_CMD_SGL_METABUF;
 	cmd->flags = NVME_CMD_SGL_METABUF;
 
 
@@ -770,23 +785,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
 	return BLK_STS_OK;
 	return BLK_STS_OK;
 }
 }
 
 
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
-{
-	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-	unsigned int avg_seg_size;
-
-	avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
-			blk_rq_nr_phys_segments(req));
-
-	if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
-		return false;
-	if (!iod->nvmeq->qid)
-		return false;
-	if (!sgl_threshold || avg_seg_size < sgl_threshold)
-		return false;
-	return true;
-}
-
 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 		struct nvme_command *cmnd)
 		struct nvme_command *cmnd)
 {
 {
@@ -806,7 +804,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 				DMA_ATTR_NO_WARN))
 				DMA_ATTR_NO_WARN))
 		goto out;
 		goto out;
 
 
-	if (nvme_pci_use_sgls(dev, req))
+	if (iod->use_sgl)
 		ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
 		ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
 	else
 	else
 		ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
 		ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);

+ 13 - 1
drivers/nvme/host/rdma.c

@@ -974,12 +974,18 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 	nvme_start_queues(&ctrl->ctrl);
 	nvme_start_queues(&ctrl->ctrl);
 
 
+	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+		/* state change failure should never happen */
+		WARN_ON_ONCE(1);
+		return;
+	}
+
 	nvme_rdma_reconnect_or_remove(ctrl);
 	nvme_rdma_reconnect_or_remove(ctrl);
 }
 }
 
 
 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
 {
 {
-	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
+	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
 		return;
 		return;
 
 
 	queue_work(nvme_wq, &ctrl->err_work);
 	queue_work(nvme_wq, &ctrl->err_work);
@@ -1753,6 +1759,12 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
 	nvme_stop_ctrl(&ctrl->ctrl);
 	nvme_stop_ctrl(&ctrl->ctrl);
 	nvme_rdma_shutdown_ctrl(ctrl, false);
 	nvme_rdma_shutdown_ctrl(ctrl, false);
 
 
+	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+		/* state change failure should never happen */
+		WARN_ON_ONCE(1);
+		return;
+	}
+
 	ret = nvme_rdma_configure_admin_queue(ctrl, false);
 	ret = nvme_rdma_configure_admin_queue(ctrl, false);
 	if (ret)
 	if (ret)
 		goto out_fail;
 		goto out_fail;

+ 1 - 1
drivers/nvme/target/fcloop.c

@@ -1085,7 +1085,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
 		const char *buf, size_t count)
 		const char *buf, size_t count)
 {
 {
 	struct fcloop_nport *nport = NULL, *tmpport;
 	struct fcloop_nport *nport = NULL, *tmpport;
-	struct fcloop_tport *tport;
+	struct fcloop_tport *tport = NULL;
 	u64 nodename, portname;
 	u64 nodename, portname;
 	unsigned long flags;
 	unsigned long flags;
 	int ret;
 	int ret;