|
@@ -194,7 +194,8 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
|
|
|
if (!ltb->buff)
|
|
|
return;
|
|
|
|
|
|
- if (!adapter->failover)
|
|
|
+ if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
|
|
|
+ adapter->reset_reason != VNIC_RESET_MOBILITY)
|
|
|
send_request_unmap(adapter, ltb->map_id);
|
|
|
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
|
|
|
}
|
|
@@ -292,9 +293,6 @@ static void replenish_pools(struct ibmvnic_adapter *adapter)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
- if (adapter->migrated)
|
|
|
- return;
|
|
|
-
|
|
|
adapter->replenish_task_cycles++;
|
|
|
for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
|
|
|
i++) {
|
|
@@ -350,7 +348,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
|
|
|
free_long_term_buff(adapter, &rx_pool->long_term_buff);
|
|
|
|
|
|
if (!rx_pool->rx_buff)
|
|
|
- continue;
|
|
|
+ continue;
|
|
|
|
|
|
for (j = 0; j < rx_pool->size; j++) {
|
|
|
if (rx_pool->rx_buff[j].skb) {
|
|
@@ -554,11 +552,20 @@ static int ibmvnic_login(struct net_device *netdev)
|
|
|
|
|
|
static void release_resources(struct ibmvnic_adapter *adapter)
|
|
|
{
|
|
|
+ int i;
|
|
|
+
|
|
|
release_tx_pools(adapter);
|
|
|
release_rx_pools(adapter);
|
|
|
|
|
|
release_stats_token(adapter);
|
|
|
release_error_buffers(adapter);
|
|
|
+
|
|
|
+ if (adapter->napi) {
|
|
|
+ for (i = 0; i < adapter->req_rx_queues; i++) {
|
|
|
+ if (&adapter->napi[i])
|
|
|
+ netif_napi_del(&adapter->napi[i]);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
|
|
@@ -569,11 +576,6 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
|
|
|
bool resend;
|
|
|
int rc;
|
|
|
|
|
|
- if (adapter->logical_link_state == link_state) {
|
|
|
- netdev_dbg(netdev, "Link state already %d\n", link_state);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
netdev_err(netdev, "setting link state %d\n", link_state);
|
|
|
memset(&crq, 0, sizeof(crq));
|
|
|
crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
|
|
@@ -624,22 +626,10 @@ static int set_real_num_queues(struct net_device *netdev)
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
-static int ibmvnic_open(struct net_device *netdev)
|
|
|
+static int init_resources(struct ibmvnic_adapter *adapter)
|
|
|
{
|
|
|
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
|
- struct device *dev = &adapter->vdev->dev;
|
|
|
- int rc = 0;
|
|
|
- int i;
|
|
|
-
|
|
|
- if (adapter->is_closed) {
|
|
|
- rc = ibmvnic_init(adapter);
|
|
|
- if (rc)
|
|
|
- return rc;
|
|
|
- }
|
|
|
-
|
|
|
- rc = ibmvnic_login(netdev);
|
|
|
- if (rc)
|
|
|
- return rc;
|
|
|
+ struct net_device *netdev = adapter->netdev;
|
|
|
+ int i, rc;
|
|
|
|
|
|
rc = set_real_num_queues(netdev);
|
|
|
if (rc)
|
|
@@ -647,7 +637,7 @@ static int ibmvnic_open(struct net_device *netdev)
|
|
|
|
|
|
rc = init_sub_crq_irqs(adapter);
|
|
|
if (rc) {
|
|
|
- dev_err(dev, "failed to initialize sub crq irqs\n");
|
|
|
+ netdev_err(netdev, "failed to initialize sub crq irqs\n");
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
@@ -659,90 +649,184 @@ static int ibmvnic_open(struct net_device *netdev)
|
|
|
adapter->napi = kcalloc(adapter->req_rx_queues,
|
|
|
sizeof(struct napi_struct), GFP_KERNEL);
|
|
|
if (!adapter->napi)
|
|
|
- goto ibmvnic_open_fail;
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
for (i = 0; i < adapter->req_rx_queues; i++) {
|
|
|
netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
|
|
|
NAPI_POLL_WEIGHT);
|
|
|
- napi_enable(&adapter->napi[i]);
|
|
|
}
|
|
|
|
|
|
send_map_query(adapter);
|
|
|
|
|
|
rc = init_rx_pools(netdev);
|
|
|
if (rc)
|
|
|
- goto ibmvnic_open_fail;
|
|
|
+ return rc;
|
|
|
|
|
|
rc = init_tx_pools(netdev);
|
|
|
- if (rc)
|
|
|
- goto ibmvnic_open_fail;
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+static int __ibmvnic_open(struct net_device *netdev)
|
|
|
+{
|
|
|
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
|
+ enum vnic_state prev_state = adapter->state;
|
|
|
+ int i, rc;
|
|
|
|
|
|
+ adapter->state = VNIC_OPENING;
|
|
|
replenish_pools(adapter);
|
|
|
|
|
|
+ for (i = 0; i < adapter->req_rx_queues; i++)
|
|
|
+ napi_enable(&adapter->napi[i]);
|
|
|
+
|
|
|
/* We're ready to receive frames, enable the sub-crq interrupts and
|
|
|
* set the logical link state to up
|
|
|
*/
|
|
|
- for (i = 0; i < adapter->req_rx_queues; i++)
|
|
|
- enable_scrq_irq(adapter, adapter->rx_scrq[i]);
|
|
|
+ for (i = 0; i < adapter->req_rx_queues; i++) {
|
|
|
+ if (prev_state == VNIC_CLOSED)
|
|
|
+ enable_irq(adapter->rx_scrq[i]->irq);
|
|
|
+ else
|
|
|
+ enable_scrq_irq(adapter, adapter->rx_scrq[i]);
|
|
|
+ }
|
|
|
|
|
|
- for (i = 0; i < adapter->req_tx_queues; i++)
|
|
|
- enable_scrq_irq(adapter, adapter->tx_scrq[i]);
|
|
|
+ for (i = 0; i < adapter->req_tx_queues; i++) {
|
|
|
+ if (prev_state == VNIC_CLOSED)
|
|
|
+ enable_irq(adapter->tx_scrq[i]->irq);
|
|
|
+ else
|
|
|
+ enable_scrq_irq(adapter, adapter->tx_scrq[i]);
|
|
|
+ }
|
|
|
|
|
|
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
|
|
|
- if (rc)
|
|
|
- goto ibmvnic_open_fail;
|
|
|
+ if (rc) {
|
|
|
+ for (i = 0; i < adapter->req_rx_queues; i++)
|
|
|
+ napi_disable(&adapter->napi[i]);
|
|
|
+ release_resources(adapter);
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
|
|
|
netif_tx_start_all_queues(netdev);
|
|
|
- adapter->is_closed = false;
|
|
|
|
|
|
- return 0;
|
|
|
+ if (prev_state == VNIC_CLOSED) {
|
|
|
+ for (i = 0; i < adapter->req_rx_queues; i++)
|
|
|
+ napi_schedule(&adapter->napi[i]);
|
|
|
+ }
|
|
|
|
|
|
-ibmvnic_open_fail:
|
|
|
- for (i = 0; i < adapter->req_rx_queues; i++)
|
|
|
- napi_disable(&adapter->napi[i]);
|
|
|
- release_resources(adapter);
|
|
|
- return -ENOMEM;
|
|
|
+ adapter->state = VNIC_OPEN;
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
-static void disable_sub_crqs(struct ibmvnic_adapter *adapter)
|
|
|
+static int ibmvnic_open(struct net_device *netdev)
|
|
|
{
|
|
|
- int i;
|
|
|
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
|
+ int rc;
|
|
|
|
|
|
- if (adapter->tx_scrq) {
|
|
|
- for (i = 0; i < adapter->req_tx_queues; i++)
|
|
|
- if (adapter->tx_scrq[i])
|
|
|
- disable_irq(adapter->tx_scrq[i]->irq);
|
|
|
+ mutex_lock(&adapter->reset_lock);
|
|
|
+
|
|
|
+ if (adapter->state != VNIC_CLOSED) {
|
|
|
+ rc = ibmvnic_login(netdev);
|
|
|
+ if (rc) {
|
|
|
+ mutex_unlock(&adapter->reset_lock);
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+
|
|
|
+ rc = init_resources(adapter);
|
|
|
+ if (rc) {
|
|
|
+ netdev_err(netdev, "failed to initialize resources\n");
|
|
|
+ release_resources(adapter);
|
|
|
+ mutex_unlock(&adapter->reset_lock);
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- if (adapter->rx_scrq) {
|
|
|
- for (i = 0; i < adapter->req_rx_queues; i++)
|
|
|
- if (adapter->rx_scrq[i])
|
|
|
- disable_irq(adapter->rx_scrq[i]->irq);
|
|
|
+ rc = __ibmvnic_open(netdev);
|
|
|
+ mutex_unlock(&adapter->reset_lock);
|
|
|
+
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+static void clean_tx_pools(struct ibmvnic_adapter *adapter)
|
|
|
+{
|
|
|
+ struct ibmvnic_tx_pool *tx_pool;
|
|
|
+ u64 tx_entries;
|
|
|
+ int tx_scrqs;
|
|
|
+ int i, j;
|
|
|
+
|
|
|
+ if (!adapter->tx_pool)
|
|
|
+ return;
|
|
|
+
|
|
|
+ tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
|
|
|
+ tx_entries = adapter->req_tx_entries_per_subcrq;
|
|
|
+
|
|
|
+ /* Free any remaining skbs in the tx buffer pools */
|
|
|
+ for (i = 0; i < tx_scrqs; i++) {
|
|
|
+ tx_pool = &adapter->tx_pool[i];
|
|
|
+ if (!tx_pool)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ for (j = 0; j < tx_entries; j++) {
|
|
|
+ if (tx_pool->tx_buff[j].skb) {
|
|
|
+ dev_kfree_skb_any(tx_pool->tx_buff[j].skb);
|
|
|
+ tx_pool->tx_buff[j].skb = NULL;
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int ibmvnic_close(struct net_device *netdev)
|
|
|
+static int __ibmvnic_close(struct net_device *netdev)
|
|
|
{
|
|
|
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
|
int rc = 0;
|
|
|
int i;
|
|
|
|
|
|
- adapter->closing = true;
|
|
|
- disable_sub_crqs(adapter);
|
|
|
+ adapter->state = VNIC_CLOSING;
|
|
|
+ netif_tx_stop_all_queues(netdev);
|
|
|
|
|
|
if (adapter->napi) {
|
|
|
for (i = 0; i < adapter->req_rx_queues; i++)
|
|
|
napi_disable(&adapter->napi[i]);
|
|
|
}
|
|
|
|
|
|
- if (!adapter->failover)
|
|
|
- netif_tx_stop_all_queues(netdev);
|
|
|
+ clean_tx_pools(adapter);
|
|
|
+
|
|
|
+ if (adapter->tx_scrq) {
|
|
|
+ for (i = 0; i < adapter->req_tx_queues; i++)
|
|
|
+ if (adapter->tx_scrq[i]->irq)
|
|
|
+ disable_irq(adapter->tx_scrq[i]->irq);
|
|
|
+ }
|
|
|
|
|
|
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
|
|
|
- release_resources(adapter);
|
|
|
+ if (adapter->rx_scrq) {
|
|
|
+ for (i = 0; i < adapter->req_rx_queues; i++) {
|
|
|
+ int retries = 10;
|
|
|
+
|
|
|
+ while (pending_scrq(adapter, adapter->rx_scrq[i])) {
|
|
|
+ retries--;
|
|
|
+ mdelay(100);
|
|
|
+
|
|
|
+ if (retries == 0)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (adapter->rx_scrq[i]->irq)
|
|
|
+ disable_irq(adapter->rx_scrq[i]->irq);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ adapter->state = VNIC_CLOSED;
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+static int ibmvnic_close(struct net_device *netdev)
|
|
|
+{
|
|
|
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ mutex_lock(&adapter->reset_lock);
|
|
|
+ rc = __ibmvnic_close(netdev);
|
|
|
+ mutex_unlock(&adapter->reset_lock);
|
|
|
|
|
|
- adapter->is_closed = true;
|
|
|
- adapter->closing = false;
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
@@ -901,13 +985,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
int index = 0;
|
|
|
int ret = 0;
|
|
|
|
|
|
- tx_pool = &adapter->tx_pool[queue_num];
|
|
|
- tx_scrq = adapter->tx_scrq[queue_num];
|
|
|
- txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
|
|
|
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
|
|
|
- be32_to_cpu(adapter->login_rsp_buf->
|
|
|
- off_txsubm_subcrqs));
|
|
|
- if (adapter->migrated) {
|
|
|
+ if (adapter->resetting) {
|
|
|
if (!netif_subqueue_stopped(netdev, skb))
|
|
|
netif_stop_subqueue(netdev, queue_num);
|
|
|
dev_kfree_skb_any(skb);
|
|
@@ -918,6 +996,12 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ tx_pool = &adapter->tx_pool[queue_num];
|
|
|
+ tx_scrq = adapter->tx_scrq[queue_num];
|
|
|
+ txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
|
|
|
+ handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
|
|
|
+ be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
|
|
|
+
|
|
|
index = tx_pool->free_map[tx_pool->consumer_index];
|
|
|
offset = index * adapter->req_mtu;
|
|
|
dst = tx_pool->long_term_buff.buff + offset;
|
|
@@ -1099,18 +1183,185 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void ibmvnic_tx_timeout(struct net_device *dev)
|
|
|
+/**
|
|
|
+ * do_reset returns zero if we are able to keep processing reset events, or
|
|
|
+ * non-zero if we hit a fatal error and must halt.
|
|
|
+ */
|
|
|
+static int do_reset(struct ibmvnic_adapter *adapter,
|
|
|
+ struct ibmvnic_rwi *rwi, u32 reset_state)
|
|
|
{
|
|
|
- struct ibmvnic_adapter *adapter = netdev_priv(dev);
|
|
|
- int rc;
|
|
|
+ struct net_device *netdev = adapter->netdev;
|
|
|
+ int i, rc;
|
|
|
+
|
|
|
+ netif_carrier_off(netdev);
|
|
|
+ adapter->reset_reason = rwi->reset_reason;
|
|
|
+
|
|
|
+ if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
|
|
|
+ rc = ibmvnic_reenable_crq_queue(adapter);
|
|
|
+ if (rc)
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
|
|
|
- /* Adapter timed out, resetting it */
|
|
|
+ rc = __ibmvnic_close(netdev);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+
|
|
|
+ /* remove the closed state so when we call open it appears
|
|
|
+ * we are coming from the probed state.
|
|
|
+ */
|
|
|
+ adapter->state = VNIC_PROBED;
|
|
|
+
|
|
|
+ release_resources(adapter);
|
|
|
release_sub_crqs(adapter);
|
|
|
- rc = ibmvnic_reset_crq(adapter);
|
|
|
+ release_crq_queue(adapter);
|
|
|
+
|
|
|
+ rc = ibmvnic_init(adapter);
|
|
|
if (rc)
|
|
|
- dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
|
|
|
- else
|
|
|
- ibmvnic_send_crq_init(adapter);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* If the adapter was in PROBE state prior to the reset, exit here. */
|
|
|
+ if (reset_state == VNIC_PROBED)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ rc = ibmvnic_login(netdev);
|
|
|
+ if (rc) {
|
|
|
+ adapter->state = VNIC_PROBED;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ rtnl_lock();
|
|
|
+ rc = init_resources(adapter);
|
|
|
+ rtnl_unlock();
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+
|
|
|
+ if (reset_state == VNIC_CLOSED)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ rc = __ibmvnic_open(netdev);
|
|
|
+ if (rc) {
|
|
|
+ if (list_empty(&adapter->rwi_list))
|
|
|
+ adapter->state = VNIC_CLOSED;
|
|
|
+ else
|
|
|
+ adapter->state = reset_state;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ netif_carrier_on(netdev);
|
|
|
+
|
|
|
+ /* kick napi */
|
|
|
+ for (i = 0; i < adapter->req_rx_queues; i++)
|
|
|
+ napi_schedule(&adapter->napi[i]);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
|
|
|
+{
|
|
|
+ struct ibmvnic_rwi *rwi;
|
|
|
+
|
|
|
+ mutex_lock(&adapter->rwi_lock);
|
|
|
+
|
|
|
+ if (!list_empty(&adapter->rwi_list)) {
|
|
|
+ rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
|
|
|
+ list);
|
|
|
+ list_del(&rwi->list);
|
|
|
+ } else {
|
|
|
+ rwi = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ mutex_unlock(&adapter->rwi_lock);
|
|
|
+ return rwi;
|
|
|
+}
|
|
|
+
|
|
|
+static void free_all_rwi(struct ibmvnic_adapter *adapter)
|
|
|
+{
|
|
|
+ struct ibmvnic_rwi *rwi;
|
|
|
+
|
|
|
+ rwi = get_next_rwi(adapter);
|
|
|
+ while (rwi) {
|
|
|
+ kfree(rwi);
|
|
|
+ rwi = get_next_rwi(adapter);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void __ibmvnic_reset(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct ibmvnic_rwi *rwi;
|
|
|
+ struct ibmvnic_adapter *adapter;
|
|
|
+ struct net_device *netdev;
|
|
|
+ u32 reset_state;
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
|
|
|
+ netdev = adapter->netdev;
|
|
|
+
|
|
|
+ mutex_lock(&adapter->reset_lock);
|
|
|
+ adapter->resetting = true;
|
|
|
+ reset_state = adapter->state;
|
|
|
+
|
|
|
+ rwi = get_next_rwi(adapter);
|
|
|
+ while (rwi) {
|
|
|
+ rc = do_reset(adapter, rwi, reset_state);
|
|
|
+ kfree(rwi);
|
|
|
+ if (rc)
|
|
|
+ break;
|
|
|
+
|
|
|
+ rwi = get_next_rwi(adapter);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (rc) {
|
|
|
+ free_all_rwi(adapter);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ adapter->resetting = false;
|
|
|
+ mutex_unlock(&adapter->reset_lock);
|
|
|
+}
|
|
|
+
|
|
|
+static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|
|
+ enum ibmvnic_reset_reason reason)
|
|
|
+{
|
|
|
+ struct ibmvnic_rwi *rwi, *tmp;
|
|
|
+ struct net_device *netdev = adapter->netdev;
|
|
|
+ struct list_head *entry;
|
|
|
+
|
|
|
+ if (adapter->state == VNIC_REMOVING ||
|
|
|
+ adapter->state == VNIC_REMOVED) {
|
|
|
+ netdev_dbg(netdev, "Adapter removing, skipping reset\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ mutex_lock(&adapter->rwi_lock);
|
|
|
+
|
|
|
+ list_for_each(entry, &adapter->rwi_list) {
|
|
|
+ tmp = list_entry(entry, struct ibmvnic_rwi, list);
|
|
|
+ if (tmp->reset_reason == reason) {
|
|
|
+ netdev_err(netdev, "Matching reset found, skipping\n");
|
|
|
+ mutex_unlock(&adapter->rwi_lock);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
|
|
|
+ if (!rwi) {
|
|
|
+ mutex_unlock(&adapter->rwi_lock);
|
|
|
+ ibmvnic_close(netdev);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ rwi->reset_reason = reason;
|
|
|
+ list_add_tail(&rwi->list, &adapter->rwi_list);
|
|
|
+ mutex_unlock(&adapter->rwi_lock);
|
|
|
+ schedule_work(&adapter->ibmvnic_reset);
|
|
|
+}
|
|
|
+
|
|
|
+static void ibmvnic_tx_timeout(struct net_device *dev)
|
|
|
+{
|
|
|
+ struct ibmvnic_adapter *adapter = netdev_priv(dev);
|
|
|
+
|
|
|
+ ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
|
|
|
}
|
|
|
|
|
|
static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
|
|
@@ -1153,7 +1404,7 @@ restart_poll:
|
|
|
/* free the entry */
|
|
|
next->rx_comp.first = 0;
|
|
|
remove_buff_from_pool(adapter, rx_buff);
|
|
|
- break;
|
|
|
+ continue;
|
|
|
}
|
|
|
|
|
|
length = be32_to_cpu(next->rx_comp.len);
|
|
@@ -1177,6 +1428,7 @@ restart_poll:
|
|
|
|
|
|
skb_put(skb, length);
|
|
|
skb->protocol = eth_type_trans(skb, netdev);
|
|
|
+ skb_record_rx_queue(skb, scrq_num);
|
|
|
|
|
|
if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
|
|
|
flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
|
|
@@ -1557,19 +1809,8 @@ restart_loop:
|
|
|
}
|
|
|
|
|
|
if (txbuff->last_frag) {
|
|
|
- if (atomic_sub_return(next->tx_comp.num_comps,
|
|
|
- &scrq->used) <=
|
|
|
- (adapter->req_tx_entries_per_subcrq / 2) &&
|
|
|
- netif_subqueue_stopped(adapter->netdev,
|
|
|
- txbuff->skb)) {
|
|
|
- netif_wake_subqueue(adapter->netdev,
|
|
|
- scrq->pool_index);
|
|
|
- netdev_dbg(adapter->netdev,
|
|
|
- "Started queue %d\n",
|
|
|
- scrq->pool_index);
|
|
|
- }
|
|
|
-
|
|
|
dev_kfree_skb_any(txbuff->skb);
|
|
|
+ txbuff->skb = NULL;
|
|
|
}
|
|
|
|
|
|
adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
|
|
@@ -1580,6 +1821,15 @@ restart_loop:
|
|
|
}
|
|
|
/* remove tx_comp scrq*/
|
|
|
next->tx_comp.first = 0;
|
|
|
+
|
|
|
+ if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <=
|
|
|
+ (adapter->req_tx_entries_per_subcrq / 2) &&
|
|
|
+ __netif_subqueue_stopped(adapter->netdev,
|
|
|
+ scrq->pool_index)) {
|
|
|
+ netif_wake_subqueue(adapter->netdev, scrq->pool_index);
|
|
|
+ netdev_info(adapter->netdev, "Started queue %d\n",
|
|
|
+ scrq->pool_index);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
enable_scrq_irq(adapter, scrq);
|
|
@@ -1853,7 +2103,8 @@ static int pending_scrq(struct ibmvnic_adapter *adapter,
|
|
|
{
|
|
|
union sub_crq *entry = &scrq->msgs[scrq->cur];
|
|
|
|
|
|
- if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
|
|
|
+ if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP ||
|
|
|
+ adapter->state == VNIC_CLOSING)
|
|
|
return 1;
|
|
|
else
|
|
|
return 0;
|
|
@@ -1991,18 +2242,6 @@ static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
|
|
|
return ibmvnic_send_crq(adapter, &crq);
|
|
|
}
|
|
|
|
|
|
-static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
|
|
|
-{
|
|
|
- union ibmvnic_crq crq;
|
|
|
-
|
|
|
- memset(&crq, 0, sizeof(crq));
|
|
|
- crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
|
|
|
- crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
|
|
|
- netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
|
|
|
-
|
|
|
- return ibmvnic_send_crq(adapter, &crq);
|
|
|
-}
|
|
|
-
|
|
|
static int send_version_xchg(struct ibmvnic_adapter *adapter)
|
|
|
{
|
|
|
union ibmvnic_crq crq;
|
|
@@ -2500,6 +2739,9 @@ static void handle_error_indication(union ibmvnic_crq *crq,
|
|
|
|
|
|
if (be32_to_cpu(crq->error_indication.error_id))
|
|
|
request_error_information(adapter, crq);
|
|
|
+
|
|
|
+ if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
|
|
|
+ ibmvnic_reset(adapter, VNIC_RESET_FATAL);
|
|
|
}
|
|
|
|
|
|
static void handle_change_mac_rsp(union ibmvnic_crq *crq,
|
|
@@ -2888,26 +3130,6 @@ out:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void ibmvnic_xport_event(struct work_struct *work)
|
|
|
-{
|
|
|
- struct ibmvnic_adapter *adapter = container_of(work,
|
|
|
- struct ibmvnic_adapter,
|
|
|
- ibmvnic_xport);
|
|
|
- struct device *dev = &adapter->vdev->dev;
|
|
|
- long rc;
|
|
|
-
|
|
|
- release_sub_crqs(adapter);
|
|
|
- if (adapter->migrated) {
|
|
|
- rc = ibmvnic_reenable_crq_queue(adapter);
|
|
|
- if (rc)
|
|
|
- dev_err(dev, "Error after enable rc=%ld\n", rc);
|
|
|
- adapter->migrated = false;
|
|
|
- rc = ibmvnic_send_crq_init(adapter);
|
|
|
- if (rc)
|
|
|
- dev_err(dev, "Error sending init rc=%ld\n", rc);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|
|
struct ibmvnic_adapter *adapter)
|
|
|
{
|
|
@@ -2925,12 +3147,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|
|
switch (gen_crq->cmd) {
|
|
|
case IBMVNIC_CRQ_INIT:
|
|
|
dev_info(dev, "Partner initialized\n");
|
|
|
- /* Send back a response */
|
|
|
- rc = ibmvnic_send_crq_init_complete(adapter);
|
|
|
- if (!rc)
|
|
|
- schedule_work(&adapter->vnic_crq_init);
|
|
|
- else
|
|
|
- dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
|
|
|
break;
|
|
|
case IBMVNIC_CRQ_INIT_COMPLETE:
|
|
|
dev_info(dev, "Partner initialization complete\n");
|
|
@@ -2941,19 +3157,18 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|
|
}
|
|
|
return;
|
|
|
case IBMVNIC_CRQ_XPORT_EVENT:
|
|
|
+ netif_carrier_off(netdev);
|
|
|
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
|
|
|
- dev_info(dev, "Re-enabling adapter\n");
|
|
|
- adapter->migrated = true;
|
|
|
- schedule_work(&adapter->ibmvnic_xport);
|
|
|
+ dev_info(dev, "Migrated, re-enabling adapter\n");
|
|
|
+ ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
|
|
|
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
|
|
|
dev_info(dev, "Backing device failover detected\n");
|
|
|
- netif_carrier_off(netdev);
|
|
|
- adapter->failover = true;
|
|
|
+ ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
|
|
|
} else {
|
|
|
/* The adapter lost the connection */
|
|
|
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
|
|
|
gen_crq->cmd);
|
|
|
- schedule_work(&adapter->ibmvnic_xport);
|
|
|
+ ibmvnic_reset(adapter, VNIC_RESET_FATAL);
|
|
|
}
|
|
|
return;
|
|
|
case IBMVNIC_CRQ_CMD_RSP:
|
|
@@ -3234,64 +3449,6 @@ map_failed:
|
|
|
return retrc;
|
|
|
}
|
|
|
|
|
|
-static void handle_crq_init_rsp(struct work_struct *work)
|
|
|
-{
|
|
|
- struct ibmvnic_adapter *adapter = container_of(work,
|
|
|
- struct ibmvnic_adapter,
|
|
|
- vnic_crq_init);
|
|
|
- struct device *dev = &adapter->vdev->dev;
|
|
|
- struct net_device *netdev = adapter->netdev;
|
|
|
- unsigned long timeout = msecs_to_jiffies(30000);
|
|
|
- bool restart = false;
|
|
|
- int rc;
|
|
|
-
|
|
|
- if (adapter->failover) {
|
|
|
- release_sub_crqs(adapter);
|
|
|
- if (netif_running(netdev)) {
|
|
|
- netif_tx_disable(netdev);
|
|
|
- ibmvnic_close(netdev);
|
|
|
- restart = true;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- reinit_completion(&adapter->init_done);
|
|
|
- send_version_xchg(adapter);
|
|
|
- if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
|
|
|
- dev_err(dev, "Passive init timeout\n");
|
|
|
- goto task_failed;
|
|
|
- }
|
|
|
-
|
|
|
- netdev->mtu = adapter->req_mtu - ETH_HLEN;
|
|
|
-
|
|
|
- if (adapter->failover) {
|
|
|
- adapter->failover = false;
|
|
|
- if (restart) {
|
|
|
- rc = ibmvnic_open(netdev);
|
|
|
- if (rc)
|
|
|
- goto restart_failed;
|
|
|
- }
|
|
|
- netif_carrier_on(netdev);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- rc = register_netdev(netdev);
|
|
|
- if (rc) {
|
|
|
- dev_err(dev,
|
|
|
- "failed to register netdev rc=%d\n", rc);
|
|
|
- goto register_failed;
|
|
|
- }
|
|
|
- dev_info(dev, "ibmvnic registered\n");
|
|
|
-
|
|
|
- return;
|
|
|
-
|
|
|
-restart_failed:
|
|
|
- dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
|
|
|
-register_failed:
|
|
|
- release_sub_crqs(adapter);
|
|
|
-task_failed:
|
|
|
- dev_err(dev, "Passive initialization was not successful\n");
|
|
|
-}
|
|
|
-
|
|
|
static int ibmvnic_init(struct ibmvnic_adapter *adapter)
|
|
|
{
|
|
|
struct device *dev = &adapter->vdev->dev;
|
|
@@ -3346,10 +3503,10 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
adapter = netdev_priv(netdev);
|
|
|
+ adapter->state = VNIC_PROBING;
|
|
|
dev_set_drvdata(&dev->dev, netdev);
|
|
|
adapter->vdev = dev;
|
|
|
adapter->netdev = netdev;
|
|
|
- adapter->failover = false;
|
|
|
|
|
|
ether_addr_copy(adapter->mac_addr, mac_addr_p);
|
|
|
ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
|
|
@@ -3358,14 +3515,17 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|
|
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
|
|
|
SET_NETDEV_DEV(netdev, &dev->dev);
|
|
|
|
|
|
- INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
|
|
|
- INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
|
|
|
-
|
|
|
spin_lock_init(&adapter->stats_lock);
|
|
|
|
|
|
INIT_LIST_HEAD(&adapter->errors);
|
|
|
spin_lock_init(&adapter->error_list_lock);
|
|
|
|
|
|
+ INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
|
|
|
+ INIT_LIST_HEAD(&adapter->rwi_list);
|
|
|
+ mutex_init(&adapter->reset_lock);
|
|
|
+ mutex_init(&adapter->rwi_lock);
|
|
|
+ adapter->resetting = false;
|
|
|
+
|
|
|
rc = ibmvnic_init(adapter);
|
|
|
if (rc) {
|
|
|
free_netdev(netdev);
|
|
@@ -3373,7 +3533,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|
|
}
|
|
|
|
|
|
netdev->mtu = adapter->req_mtu - ETH_HLEN;
|
|
|
- adapter->is_closed = false;
|
|
|
|
|
|
rc = register_netdev(netdev);
|
|
|
if (rc) {
|
|
@@ -3383,6 +3542,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|
|
}
|
|
|
dev_info(&dev->dev, "ibmvnic registered\n");
|
|
|
|
|
|
+ adapter->state = VNIC_PROBED;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -3391,12 +3551,17 @@ static int ibmvnic_remove(struct vio_dev *dev)
|
|
|
struct net_device *netdev = dev_get_drvdata(&dev->dev);
|
|
|
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
|
+ adapter->state = VNIC_REMOVING;
|
|
|
unregister_netdev(netdev);
|
|
|
+ mutex_lock(&adapter->reset_lock);
|
|
|
|
|
|
release_resources(adapter);
|
|
|
release_sub_crqs(adapter);
|
|
|
release_crq_queue(adapter);
|
|
|
|
|
|
+ adapter->state = VNIC_REMOVED;
|
|
|
+
|
|
|
+ mutex_unlock(&adapter->reset_lock);
|
|
|
free_netdev(netdev);
|
|
|
dev_set_drvdata(&dev->dev, NULL);
|
|
|
|