|
@@ -608,6 +608,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
|
|
pr_err("%s: Bad type %d\n", __func__, ulp_type);
|
|
pr_err("%s: Bad type %d\n", __func__, ulp_type);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ if (ulp_type == CNIC_ULP_ISCSI)
|
|
|
|
+ cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
|
|
|
|
+
|
|
mutex_lock(&cnic_lock);
|
|
mutex_lock(&cnic_lock);
|
|
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
|
|
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
|
|
RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
|
|
RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
|
|
@@ -620,9 +624,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
|
|
}
|
|
}
|
|
mutex_unlock(&cnic_lock);
|
|
mutex_unlock(&cnic_lock);
|
|
|
|
|
|
- if (ulp_type == CNIC_ULP_ISCSI)
|
|
|
|
- cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
|
|
|
|
- else if (ulp_type == CNIC_ULP_FCOE)
|
|
|
|
|
|
+ if (ulp_type == CNIC_ULP_FCOE)
|
|
dev->fcoe_cap = NULL;
|
|
dev->fcoe_cap = NULL;
|
|
|
|
|
|
synchronize_rcu();
|
|
synchronize_rcu();
|
|
@@ -1039,21 +1041,17 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
|
|
struct cnic_local *cp = dev->cnic_priv;
|
|
struct cnic_local *cp = dev->cnic_priv;
|
|
struct cnic_uio_dev *udev;
|
|
struct cnic_uio_dev *udev;
|
|
|
|
|
|
- read_lock(&cnic_dev_lock);
|
|
|
|
list_for_each_entry(udev, &cnic_udev_list, list) {
|
|
list_for_each_entry(udev, &cnic_udev_list, list) {
|
|
if (udev->pdev == dev->pcidev) {
|
|
if (udev->pdev == dev->pcidev) {
|
|
udev->dev = dev;
|
|
udev->dev = dev;
|
|
if (__cnic_alloc_uio_rings(udev, pages)) {
|
|
if (__cnic_alloc_uio_rings(udev, pages)) {
|
|
udev->dev = NULL;
|
|
udev->dev = NULL;
|
|
- read_unlock(&cnic_dev_lock);
|
|
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
cp->udev = udev;
|
|
cp->udev = udev;
|
|
- read_unlock(&cnic_dev_lock);
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- read_unlock(&cnic_dev_lock);
|
|
|
|
|
|
|
|
udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
|
|
udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
|
|
if (!udev)
|
|
if (!udev)
|
|
@@ -1067,9 +1065,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
|
|
if (__cnic_alloc_uio_rings(udev, pages))
|
|
if (__cnic_alloc_uio_rings(udev, pages))
|
|
goto err_udev;
|
|
goto err_udev;
|
|
|
|
|
|
- write_lock(&cnic_dev_lock);
|
|
|
|
list_add(&udev->list, &cnic_udev_list);
|
|
list_add(&udev->list, &cnic_udev_list);
|
|
- write_unlock(&cnic_dev_lock);
|
|
|
|
|
|
|
|
pci_dev_get(udev->pdev);
|
|
pci_dev_get(udev->pdev);
|
|
|
|
|
|
@@ -5624,20 +5620,27 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
|
|
{
|
|
{
|
|
int if_type;
|
|
int if_type;
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
|
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
|
|
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
|
|
struct cnic_ulp_ops *ulp_ops;
|
|
struct cnic_ulp_ops *ulp_ops;
|
|
void *ctx;
|
|
void *ctx;
|
|
|
|
|
|
- ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
|
|
|
|
- if (!ulp_ops || !ulp_ops->indicate_netevent)
|
|
|
|
|
|
+ mutex_lock(&cnic_lock);
|
|
|
|
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
|
|
|
|
+ lockdep_is_held(&cnic_lock));
|
|
|
|
+ if (!ulp_ops || !ulp_ops->indicate_netevent) {
|
|
|
|
+ mutex_unlock(&cnic_lock);
|
|
continue;
|
|
continue;
|
|
|
|
+ }
|
|
|
|
|
|
ctx = cp->ulp_handle[if_type];
|
|
ctx = cp->ulp_handle[if_type];
|
|
|
|
|
|
|
|
+ set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
|
|
|
|
+ mutex_unlock(&cnic_lock);
|
|
|
|
+
|
|
ulp_ops->indicate_netevent(ctx, event, vlan_id);
|
|
ulp_ops->indicate_netevent(ctx, event, vlan_id);
|
|
|
|
+
|
|
|
|
+ clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
|
|
}
|
|
}
|
|
- rcu_read_unlock();
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/* netdev event handler */
|
|
/* netdev event handler */
|