Kaynağa Gözat

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Just three small last minute regressions that were found in the last
  week. The Broadcom fix is a bit big for rc7, but since it is fixing
  driver crash regressions that were merged via netdev into rc1, I am
  sending it.

   - bnxt netdev changes merged this cycle caused the bnxt RDMA driver
     to crash under certain situations

   - Arnd found (several, unfortunately) kconfig problems with the
     patches adding INFINIBAND_ADDR_TRANS. Reverting this last part,
     will fix it more fully outside -rc.

   - Subtle change in error code for a uapi function caused breakage in
     userspace. This was bug was subtly introduced cycle"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  IB/core: Fix error code for invalid GID entry
  IB: Revert "remove redundant INFINIBAND kconfig dependencies"
  RDMA/bnxt_re: Fix broken RoCE driver due to recent L2 driver changes
Linus Torvalds 7 yıl önce
ebeveyn
işleme
7fdf3e8616

+ 1 - 1
drivers/infiniband/core/cache.c

@@ -502,7 +502,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
 	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
-		return -EAGAIN;
+		return -EINVAL;
 
 
 	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
 	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
 	if (attr) {
 	if (attr) {

+ 54 - 1
drivers/infiniband/hw/bnxt_re/main.c

@@ -185,12 +185,65 @@ static void bnxt_re_shutdown(void *p)
 	bnxt_re_ib_unreg(rdev, false);
 	bnxt_re_ib_unreg(rdev, false);
 }
 }
 
 
+static void bnxt_re_stop_irq(void *handle)
+{
+	struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+	struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+	struct bnxt_qplib_nq *nq;
+	int indx;
+
+	for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
+		nq = &rdev->nq[indx - 1];
+		bnxt_qplib_nq_stop_irq(nq, false);
+	}
+
+	bnxt_qplib_rcfw_stop_irq(rcfw, false);
+}
+
+static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+{
+	struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+	struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
+	struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+	struct bnxt_qplib_nq *nq;
+	int indx, rc;
+
+	if (!ent) {
+		/* Not setting the f/w timeout bit in rcfw.
+		 * During the driver unload the first command
+		 * to f/w will timeout and that will set the
+		 * timeout bit.
+		 */
+		dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
+		return;
+	}
+
+	/* Vectors may change after restart, so update with new vectors
+	 * in device sctructure.
+	 */
+	for (indx = 0; indx < rdev->num_msix; indx++)
+		rdev->msix_entries[indx].vector = ent[indx].vector;
+
+	bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
+				  false);
+	for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
+		nq = &rdev->nq[indx - 1];
+		rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
+					     msix_ent[indx].vector, false);
+		if (rc)
+			dev_warn(rdev_to_dev(rdev),
+				 "Failed to reinit NQ index %d\n", indx - 1);
+	}
+}
+
 static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
 static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
 	.ulp_async_notifier = NULL,
 	.ulp_async_notifier = NULL,
 	.ulp_stop = bnxt_re_stop,
 	.ulp_stop = bnxt_re_stop,
 	.ulp_start = bnxt_re_start,
 	.ulp_start = bnxt_re_start,
 	.ulp_sriov_config = bnxt_re_sriov_config,
 	.ulp_sriov_config = bnxt_re_sriov_config,
-	.ulp_shutdown = bnxt_re_shutdown
+	.ulp_shutdown = bnxt_re_shutdown,
+	.ulp_irq_stop = bnxt_re_stop_irq,
+	.ulp_irq_restart = bnxt_re_start_irq
 };
 };
 
 
 /* RoCE -> Net driver */
 /* RoCE -> Net driver */

+ 60 - 34
drivers/infiniband/hw/bnxt_re/qplib_fp.c

@@ -336,22 +336,32 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
+{
+	tasklet_disable(&nq->worker);
+	/* Mask h/w interrupt */
+	NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+	/* Sync with last running IRQ handler */
+	synchronize_irq(nq->vector);
+	if (kill)
+		tasklet_kill(&nq->worker);
+	if (nq->requested) {
+		irq_set_affinity_hint(nq->vector, NULL);
+		free_irq(nq->vector, nq);
+		nq->requested = false;
+	}
+}
+
 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
 {
 {
 	if (nq->cqn_wq) {
 	if (nq->cqn_wq) {
 		destroy_workqueue(nq->cqn_wq);
 		destroy_workqueue(nq->cqn_wq);
 		nq->cqn_wq = NULL;
 		nq->cqn_wq = NULL;
 	}
 	}
+
 	/* Make sure the HW is stopped! */
 	/* Make sure the HW is stopped! */
-	synchronize_irq(nq->vector);
-	tasklet_disable(&nq->worker);
-	tasklet_kill(&nq->worker);
+	bnxt_qplib_nq_stop_irq(nq, true);
 
 
-	if (nq->requested) {
-		irq_set_affinity_hint(nq->vector, NULL);
-		free_irq(nq->vector, nq);
-		nq->requested = false;
-	}
 	if (nq->bar_reg_iomem)
 	if (nq->bar_reg_iomem)
 		iounmap(nq->bar_reg_iomem);
 		iounmap(nq->bar_reg_iomem);
 	nq->bar_reg_iomem = NULL;
 	nq->bar_reg_iomem = NULL;
@@ -361,6 +371,40 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
 	nq->vector = 0;
 	nq->vector = 0;
 }
 }
 
 
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+			    int msix_vector, bool need_init)
+{
+	int rc;
+
+	if (nq->requested)
+		return -EFAULT;
+
+	nq->vector = msix_vector;
+	if (need_init)
+		tasklet_init(&nq->worker, bnxt_qplib_service_nq,
+			     (unsigned long)nq);
+	else
+		tasklet_enable(&nq->worker);
+
+	snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
+	rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
+	if (rc)
+		return rc;
+
+	cpumask_clear(&nq->mask);
+	cpumask_set_cpu(nq_indx, &nq->mask);
+	rc = irq_set_affinity_hint(nq->vector, &nq->mask);
+	if (rc) {
+		dev_warn(&nq->pdev->dev,
+			 "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
+			 nq->vector, nq_indx);
+	}
+	nq->requested = true;
+	NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+
+	return rc;
+}
+
 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 			 int nq_idx, int msix_vector, int bar_reg_offset,
 			 int nq_idx, int msix_vector, int bar_reg_offset,
 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
@@ -372,41 +416,17 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 	resource_size_t nq_base;
 	resource_size_t nq_base;
 	int rc = -1;
 	int rc = -1;
 
 
-	nq->pdev = pdev;
-	nq->vector = msix_vector;
 	if (cqn_handler)
 	if (cqn_handler)
 		nq->cqn_handler = cqn_handler;
 		nq->cqn_handler = cqn_handler;
 
 
 	if (srqn_handler)
 	if (srqn_handler)
 		nq->srqn_handler = srqn_handler;
 		nq->srqn_handler = srqn_handler;
 
 
-	tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
-
 	/* Have a task to schedule CQ notifiers in post send case */
 	/* Have a task to schedule CQ notifiers in post send case */
 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
 	if (!nq->cqn_wq)
 	if (!nq->cqn_wq)
-		goto fail;
-
-	nq->requested = false;
-	memset(nq->name, 0, 32);
-	sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
-	rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
-	if (rc) {
-		dev_err(&nq->pdev->dev,
-			"Failed to request IRQ for NQ: %#x", rc);
-		goto fail;
-	}
-
-	cpumask_clear(&nq->mask);
-	cpumask_set_cpu(nq_idx, &nq->mask);
-	rc = irq_set_affinity_hint(nq->vector, &nq->mask);
-	if (rc) {
-		dev_warn(&nq->pdev->dev,
-			 "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
-			 nq->vector, nq_idx);
-	}
+		return -ENOMEM;
 
 
-	nq->requested = true;
 	nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
 	nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
 	nq->bar_reg_off = bar_reg_offset;
 	nq->bar_reg_off = bar_reg_offset;
 	nq_base = pci_resource_start(pdev, nq->bar_reg);
 	nq_base = pci_resource_start(pdev, nq->bar_reg);
@@ -419,7 +439,13 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto fail;
 		goto fail;
 	}
 	}
-	NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+
+	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
+	if (rc) {
+		dev_err(&nq->pdev->dev,
+			"QPLIB: Failed to request irq for nq-idx %d", nq_idx);
+		goto fail;
+	}
 
 
 	return 0;
 	return 0;
 fail:
 fail:

+ 3 - 0
drivers/infiniband/hw/bnxt_re/qplib_fp.h

@@ -467,7 +467,10 @@ struct bnxt_qplib_nq_work {
 	struct bnxt_qplib_cq    *cq;
 	struct bnxt_qplib_cq    *cq;
 };
 };
 
 
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+			    int msix_vector, bool need_init);
 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 			 int nq_idx, int msix_vector, int bar_reg_offset,
 			 int nq_idx, int msix_vector, int bar_reg_offset,
 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,

+ 43 - 18
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c

@@ -582,19 +582,29 @@ fail:
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
-void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
 {
 {
-	unsigned long indx;
-
-	/* Make sure the HW channel is stopped! */
-	synchronize_irq(rcfw->vector);
 	tasklet_disable(&rcfw->worker);
 	tasklet_disable(&rcfw->worker);
-	tasklet_kill(&rcfw->worker);
+	/* Mask h/w interrupts */
+	CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
+		rcfw->creq.max_elements);
+	/* Sync with last running IRQ-handler */
+	synchronize_irq(rcfw->vector);
+	if (kill)
+		tasklet_kill(&rcfw->worker);
 
 
 	if (rcfw->requested) {
 	if (rcfw->requested) {
 		free_irq(rcfw->vector, rcfw);
 		free_irq(rcfw->vector, rcfw);
 		rcfw->requested = false;
 		rcfw->requested = false;
 	}
 	}
+}
+
+void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+{
+	unsigned long indx;
+
+	bnxt_qplib_rcfw_stop_irq(rcfw, true);
+
 	if (rcfw->cmdq_bar_reg_iomem)
 	if (rcfw->cmdq_bar_reg_iomem)
 		iounmap(rcfw->cmdq_bar_reg_iomem);
 		iounmap(rcfw->cmdq_bar_reg_iomem);
 	rcfw->cmdq_bar_reg_iomem = NULL;
 	rcfw->cmdq_bar_reg_iomem = NULL;
@@ -614,6 +624,31 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
 	rcfw->vector = 0;
 	rcfw->vector = 0;
 }
 }
 
 
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+			      bool need_init)
+{
+	int rc;
+
+	if (rcfw->requested)
+		return -EFAULT;
+
+	rcfw->vector = msix_vector;
+	if (need_init)
+		tasklet_init(&rcfw->worker,
+			     bnxt_qplib_service_creq, (unsigned long)rcfw);
+	else
+		tasklet_enable(&rcfw->worker);
+	rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
+			 "bnxt_qplib_creq", rcfw);
+	if (rc)
+		return rc;
+	rcfw->requested = true;
+	CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
+		      rcfw->creq.max_elements);
+
+	return 0;
+}
+
 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 				   struct bnxt_qplib_rcfw *rcfw,
 				   struct bnxt_qplib_rcfw *rcfw,
 				   int msix_vector,
 				   int msix_vector,
@@ -675,27 +710,17 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 	rcfw->creq_qp_event_processed = 0;
 	rcfw->creq_qp_event_processed = 0;
 	rcfw->creq_func_event_processed = 0;
 	rcfw->creq_func_event_processed = 0;
 
 
-	rcfw->vector = msix_vector;
 	if (aeq_handler)
 	if (aeq_handler)
 		rcfw->aeq_handler = aeq_handler;
 		rcfw->aeq_handler = aeq_handler;
+	init_waitqueue_head(&rcfw->waitq);
 
 
-	tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
-		     (unsigned long)rcfw);
-
-	rcfw->requested = false;
-	rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
-			 "bnxt_qplib_creq", rcfw);
+	rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
 	if (rc) {
 	if (rc) {
 		dev_err(&rcfw->pdev->dev,
 		dev_err(&rcfw->pdev->dev,
 			"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
 			"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
 		bnxt_qplib_disable_rcfw_channel(rcfw);
 		bnxt_qplib_disable_rcfw_channel(rcfw);
 		return rc;
 		return rc;
 	}
 	}
-	rcfw->requested = true;
-
-	init_waitqueue_head(&rcfw->waitq);
-
-	CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
 
 
 	init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
 	init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
 	init.cmdq_size_cmdq_lvl = cpu_to_le16(
 	init.cmdq_size_cmdq_lvl = cpu_to_le16(

+ 3 - 0
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h

@@ -195,7 +195,10 @@ struct bnxt_qplib_rcfw {
 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
 				  struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
 				  struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+			      bool need_init);
 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 				   struct bnxt_qplib_rcfw *rcfw,
 				   struct bnxt_qplib_rcfw *rcfw,
 				   int msix_vector,
 				   int msix_vector,

+ 1 - 1
drivers/infiniband/ulp/srpt/Kconfig

@@ -1,6 +1,6 @@
 config INFINIBAND_SRPT
 config INFINIBAND_SRPT
 	tristate "InfiniBand SCSI RDMA Protocol target support"
 	tristate "InfiniBand SCSI RDMA Protocol target support"
-	depends on INFINIBAND_ADDR_TRANS && TARGET_CORE
+	depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
 	---help---
 	---help---
 
 
 	  Support for the SCSI RDMA Protocol (SRP) Target driver. The
 	  Support for the SCSI RDMA Protocol (SRP) Target driver. The

+ 1 - 1
drivers/nvme/host/Kconfig

@@ -27,7 +27,7 @@ config NVME_FABRICS
 
 
 config NVME_RDMA
 config NVME_RDMA
 	tristate "NVM Express over Fabrics RDMA host driver"
 	tristate "NVM Express over Fabrics RDMA host driver"
-	depends on INFINIBAND_ADDR_TRANS && BLOCK
+	depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
 	select NVME_CORE
 	select NVME_CORE
 	select NVME_FABRICS
 	select NVME_FABRICS
 	select SG_POOL
 	select SG_POOL

+ 1 - 1
drivers/nvme/target/Kconfig

@@ -27,7 +27,7 @@ config NVME_TARGET_LOOP
 
 
 config NVME_TARGET_RDMA
 config NVME_TARGET_RDMA
 	tristate "NVMe over Fabrics RDMA target support"
 	tristate "NVMe over Fabrics RDMA target support"
-	depends on INFINIBAND_ADDR_TRANS
+	depends on INFINIBAND && INFINIBAND_ADDR_TRANS
 	depends on NVME_TARGET
 	depends on NVME_TARGET
 	select SGL_ALLOC
 	select SGL_ALLOC
 	help
 	help

+ 1 - 1
drivers/staging/lustre/lnet/Kconfig

@@ -34,7 +34,7 @@ config LNET_SELFTEST
 
 
 config LNET_XPRT_IB
 config LNET_XPRT_IB
 	tristate "LNET infiniband support"
 	tristate "LNET infiniband support"
-	depends on LNET && PCI && INFINIBAND_ADDR_TRANS
+	depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS
 	default LNET && INFINIBAND
 	default LNET && INFINIBAND
 	help
 	help
 	  This option allows the LNET users to use infiniband as an
 	  This option allows the LNET users to use infiniband as an

+ 1 - 1
fs/cifs/Kconfig

@@ -197,7 +197,7 @@ config CIFS_SMB311
 
 
 config CIFS_SMB_DIRECT
 config CIFS_SMB_DIRECT
 	bool "SMB Direct support (Experimental)"
 	bool "SMB Direct support (Experimental)"
-	depends on CIFS=m && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND_ADDR_TRANS=y
+	depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
 	help
 	help
 	  Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1.
 	  Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1.
 	  SMB Direct allows transferring SMB packets over RDMA. If unsure,
 	  SMB Direct allows transferring SMB packets over RDMA. If unsure,

+ 1 - 1
net/9p/Kconfig

@@ -32,7 +32,7 @@ config NET_9P_XEN
 
 
 
 
 config NET_9P_RDMA
 config NET_9P_RDMA
-	depends on INET && INFINIBAND_ADDR_TRANS
+	depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS
 	tristate "9P RDMA Transport (Experimental)"
 	tristate "9P RDMA Transport (Experimental)"
 	help
 	help
 	  This builds support for an RDMA transport.
 	  This builds support for an RDMA transport.

+ 1 - 1
net/rds/Kconfig

@@ -8,7 +8,7 @@ config RDS
 
 
 config RDS_RDMA
 config RDS_RDMA
 	tristate "RDS over Infiniband"
 	tristate "RDS over Infiniband"
-	depends on RDS && INFINIBAND_ADDR_TRANS
+	depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
 	---help---
 	---help---
 	  Allow RDS to use Infiniband as a transport.
 	  Allow RDS to use Infiniband as a transport.
 	  This transport supports RDMA operations.
 	  This transport supports RDMA operations.

+ 1 - 1
net/sunrpc/Kconfig

@@ -50,7 +50,7 @@ config SUNRPC_DEBUG
 
 
 config SUNRPC_XPRT_RDMA
 config SUNRPC_XPRT_RDMA
 	tristate "RPC-over-RDMA transport"
 	tristate "RPC-over-RDMA transport"
-	depends on SUNRPC && INFINIBAND_ADDR_TRANS
+	depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS
 	default SUNRPC && INFINIBAND
 	default SUNRPC && INFINIBAND
 	select SG_POOL
 	select SG_POOL
 	help
 	help