|
@@ -1939,8 +1939,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
|
|
|
static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
|
|
static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
|
|
|
{
|
|
{
|
|
|
struct ibmvnic_rwi *rwi;
|
|
struct ibmvnic_rwi *rwi;
|
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
|
|
- mutex_lock(&adapter->rwi_lock);
|
|
|
|
|
|
|
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
|
|
|
|
|
|
|
|
if (!list_empty(&adapter->rwi_list)) {
|
|
if (!list_empty(&adapter->rwi_list)) {
|
|
|
rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
|
|
rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
|
|
@@ -1950,7 +1951,7 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
|
|
|
rwi = NULL;
|
|
rwi = NULL;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- mutex_unlock(&adapter->rwi_lock);
|
|
|
|
|
|
|
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
|
|
|
return rwi;
|
|
return rwi;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -2025,6 +2026,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|
|
struct list_head *entry, *tmp_entry;
|
|
struct list_head *entry, *tmp_entry;
|
|
|
struct ibmvnic_rwi *rwi, *tmp;
|
|
struct ibmvnic_rwi *rwi, *tmp;
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
|
+ unsigned long flags;
|
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
|
|
if (adapter->state == VNIC_REMOVING ||
|
|
if (adapter->state == VNIC_REMOVING ||
|
|
@@ -2041,13 +2043,13 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|
|
goto err;
|
|
goto err;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- mutex_lock(&adapter->rwi_lock);
|
|
|
|
|
|
|
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
|
|
|
|
|
|
|
|
list_for_each(entry, &adapter->rwi_list) {
|
|
list_for_each(entry, &adapter->rwi_list) {
|
|
|
tmp = list_entry(entry, struct ibmvnic_rwi, list);
|
|
tmp = list_entry(entry, struct ibmvnic_rwi, list);
|
|
|
if (tmp->reset_reason == reason) {
|
|
if (tmp->reset_reason == reason) {
|
|
|
netdev_dbg(netdev, "Skipping matching reset\n");
|
|
netdev_dbg(netdev, "Skipping matching reset\n");
|
|
|
- mutex_unlock(&adapter->rwi_lock);
|
|
|
|
|
|
|
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
|
|
|
ret = EBUSY;
|
|
ret = EBUSY;
|
|
|
goto err;
|
|
goto err;
|
|
|
}
|
|
}
|
|
@@ -2055,7 +2057,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|
|
|
|
|
|
|
rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
|
|
rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
|
|
|
if (!rwi) {
|
|
if (!rwi) {
|
|
|
- mutex_unlock(&adapter->rwi_lock);
|
|
|
|
|
|
|
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
|
|
|
ibmvnic_close(netdev);
|
|
ibmvnic_close(netdev);
|
|
|
ret = ENOMEM;
|
|
ret = ENOMEM;
|
|
|
goto err;
|
|
goto err;
|
|
@@ -2069,7 +2071,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|
|
}
|
|
}
|
|
|
rwi->reset_reason = reason;
|
|
rwi->reset_reason = reason;
|
|
|
list_add_tail(&rwi->list, &adapter->rwi_list);
|
|
list_add_tail(&rwi->list, &adapter->rwi_list);
|
|
|
- mutex_unlock(&adapter->rwi_lock);
|
|
|
|
|
|
|
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
|
|
|
adapter->resetting = true;
|
|
adapter->resetting = true;
|
|
|
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
|
|
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
|
|
|
schedule_work(&adapter->ibmvnic_reset);
|
|
schedule_work(&adapter->ibmvnic_reset);
|
|
@@ -4700,7 +4702,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|
|
|
|
|
|
|
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
|
|
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
|
|
|
INIT_LIST_HEAD(&adapter->rwi_list);
|
|
INIT_LIST_HEAD(&adapter->rwi_list);
|
|
|
- mutex_init(&adapter->rwi_lock);
|
|
|
|
|
|
|
+ spin_lock_init(&adapter->rwi_lock);
|
|
|
adapter->resetting = false;
|
|
adapter->resetting = false;
|
|
|
|
|
|
|
|
adapter->mac_change_pending = false;
|
|
adapter->mac_change_pending = false;
|