|
@@ -441,11 +441,15 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
|
|
|
struct bnad_skb_unmap *unmap_array;
|
|
|
struct sk_buff *skb;
|
|
|
u32 flags, unmap_cons;
|
|
|
- u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
|
|
|
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
|
|
|
+ struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
|
|
|
+
|
|
|
+ set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
|
|
|
|
|
|
- if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
|
|
|
+ if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
|
|
|
+ clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
|
|
|
return 0;
|
|
|
+ }
|
|
|
|
|
|
prefetch(bnad->netdev);
|
|
|
BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
|
|
@@ -455,10 +459,10 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
|
|
|
packets++;
|
|
|
BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
|
|
|
|
|
|
- if (qid0 == cmpl->rxq_id)
|
|
|
- rcb = ccb->rcb[0];
|
|
|
- else
|
|
|
+ if (bna_is_small_rxq(cmpl->rxq_id))
|
|
|
rcb = ccb->rcb[1];
|
|
|
+ else
|
|
|
+ rcb = ccb->rcb[0];
|
|
|
|
|
|
unmap_q = rcb->unmap_q;
|
|
|
unmap_array = unmap_q->unmap_array;
|
|
@@ -518,12 +522,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
|
|
|
if (flags & BNA_CQ_EF_VLAN)
|
|
|
__vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
|
|
|
|
|
|
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
|
|
- struct bnad_rx_ctrl *rx_ctrl;
|
|
|
-
|
|
|
- rx_ctrl = (struct bnad_rx_ctrl *) ccb->ctrl;
|
|
|
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
|
|
napi_gro_receive(&rx_ctrl->napi, skb);
|
|
|
- } else {
|
|
|
+ else {
|
|
|
netif_receive_skb(skb);
|
|
|
}
|
|
|
|
|
@@ -545,6 +546,8 @@ next:
|
|
|
bna_ib_ack(ccb->i_dbell, 0);
|
|
|
}
|
|
|
|
|
|
+ clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
|
|
|
+
|
|
|
return packets;
|
|
|
}
|
|
|
|
|
@@ -611,7 +614,7 @@ bnad_msix_mbox_handler(int irq, void *data)
|
|
|
|
|
|
bna_intr_status_get(&bnad->bna, intr_status);
|
|
|
|
|
|
- if (BNA_IS_MBOX_ERR_INTR(intr_status))
|
|
|
+ if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
|
|
|
bna_mbox_handler(&bnad->bna, intr_status);
|
|
|
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
@@ -628,6 +631,7 @@ bnad_isr(int irq, void *data)
|
|
|
struct bnad *bnad = (struct bnad *)data;
|
|
|
struct bnad_rx_info *rx_info;
|
|
|
struct bnad_rx_ctrl *rx_ctrl;
|
|
|
+ struct bna_tcb *tcb = NULL;
|
|
|
|
|
|
if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
|
|
|
return IRQ_NONE;
|
|
@@ -639,7 +643,7 @@ bnad_isr(int irq, void *data)
|
|
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
|
|
|
- if (BNA_IS_MBOX_ERR_INTR(intr_status))
|
|
|
+ if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
|
|
|
bna_mbox_handler(&bnad->bna, intr_status);
|
|
|
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
@@ -650,8 +654,11 @@ bnad_isr(int irq, void *data)
|
|
|
/* Process data interrupts */
|
|
|
/* Tx processing */
|
|
|
for (i = 0; i < bnad->num_tx; i++) {
|
|
|
- for (j = 0; j < bnad->num_txq_per_tx; j++)
|
|
|
- bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
|
|
|
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
|
|
|
+ tcb = bnad->tx_info[i].tcb[j];
|
|
|
+ if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
|
|
|
+ bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
|
|
|
+ }
|
|
|
}
|
|
|
/* Rx processing */
|
|
|
for (i = 0; i < bnad->num_rx; i++) {
|
|
@@ -706,43 +713,49 @@ bnad_set_netdev_perm_addr(struct bnad *bnad)
|
|
|
|
|
|
/* Callbacks */
|
|
|
void
|
|
|
-bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
|
|
|
+bnad_cb_mbox_intr_enable(struct bnad *bnad)
|
|
|
{
|
|
|
bnad_enable_mbox_irq(bnad);
|
|
|
}
|
|
|
|
|
|
void
|
|
|
-bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
|
|
|
+bnad_cb_mbox_intr_disable(struct bnad *bnad)
|
|
|
{
|
|
|
bnad_disable_mbox_irq(bnad);
|
|
|
}
|
|
|
|
|
|
void
|
|
|
-bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
|
|
|
+bnad_cb_ioceth_ready(struct bnad *bnad)
|
|
|
+{
|
|
|
+ bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
|
|
|
+ complete(&bnad->bnad_completions.ioc_comp);
|
|
|
+}
|
|
|
+
|
|
|
+void
|
|
|
+bnad_cb_ioceth_failed(struct bnad *bnad)
|
|
|
{
|
|
|
+ bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
|
|
|
complete(&bnad->bnad_completions.ioc_comp);
|
|
|
- bnad->bnad_completions.ioc_comp_status = status;
|
|
|
}
|
|
|
|
|
|
void
|
|
|
-bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
|
|
|
+bnad_cb_ioceth_disabled(struct bnad *bnad)
|
|
|
{
|
|
|
+ bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
|
|
|
complete(&bnad->bnad_completions.ioc_comp);
|
|
|
- bnad->bnad_completions.ioc_comp_status = status;
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
|
|
|
+bnad_cb_enet_disabled(void *arg)
|
|
|
{
|
|
|
struct bnad *bnad = (struct bnad *)arg;
|
|
|
|
|
|
- complete(&bnad->bnad_completions.port_comp);
|
|
|
-
|
|
|
netif_carrier_off(bnad->netdev);
|
|
|
+ complete(&bnad->bnad_completions.enet_comp);
|
|
|
}
|
|
|
|
|
|
void
|
|
|
-bnad_cb_port_link_status(struct bnad *bnad,
|
|
|
+bnad_cb_ethport_link_status(struct bnad *bnad,
|
|
|
enum bna_link_status link_status)
|
|
|
{
|
|
|
bool link_up = 0;
|
|
@@ -750,34 +763,60 @@ bnad_cb_port_link_status(struct bnad *bnad,
|
|
|
link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
|
|
|
|
|
|
if (link_status == BNA_CEE_UP) {
|
|
|
+ if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
|
|
|
+ BNAD_UPDATE_CTR(bnad, cee_toggle);
|
|
|
set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
|
|
|
- BNAD_UPDATE_CTR(bnad, cee_up);
|
|
|
- } else
|
|
|
+ } else {
|
|
|
+ if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
|
|
|
+ BNAD_UPDATE_CTR(bnad, cee_toggle);
|
|
|
clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
|
|
|
+ }
|
|
|
|
|
|
if (link_up) {
|
|
|
if (!netif_carrier_ok(bnad->netdev)) {
|
|
|
- struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
|
|
|
- if (!tcb)
|
|
|
- return;
|
|
|
- pr_warn("bna: %s link up\n",
|
|
|
+ uint tx_id, tcb_id;
|
|
|
+ printk(KERN_WARNING "bna: %s link up\n",
|
|
|
bnad->netdev->name);
|
|
|
netif_carrier_on(bnad->netdev);
|
|
|
BNAD_UPDATE_CTR(bnad, link_toggle);
|
|
|
- if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
|
|
|
- /* Force an immediate Transmit Schedule */
|
|
|
- pr_info("bna: %s TX_STARTED\n",
|
|
|
- bnad->netdev->name);
|
|
|
- netif_wake_queue(bnad->netdev);
|
|
|
- BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
|
|
|
- } else {
|
|
|
- netif_stop_queue(bnad->netdev);
|
|
|
- BNAD_UPDATE_CTR(bnad, netif_queue_stop);
|
|
|
+ for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
|
|
|
+ for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
|
|
|
+ tcb_id++) {
|
|
|
+ struct bna_tcb *tcb =
|
|
|
+ bnad->tx_info[tx_id].tcb[tcb_id];
|
|
|
+ u32 txq_id;
|
|
|
+ if (!tcb)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ txq_id = tcb->id;
|
|
|
+
|
|
|
+ if (test_bit(BNAD_TXQ_TX_STARTED,
|
|
|
+ &tcb->flags)) {
|
|
|
+ /*
|
|
|
+ * Force an immediate
|
|
|
+ * Transmit Schedule */
|
|
|
+ printk(KERN_INFO "bna: %s %d "
|
|
|
+ "TXQ_STARTED\n",
|
|
|
+ bnad->netdev->name,
|
|
|
+ txq_id);
|
|
|
+ netif_wake_subqueue(
|
|
|
+ bnad->netdev,
|
|
|
+ txq_id);
|
|
|
+ BNAD_UPDATE_CTR(bnad,
|
|
|
+ netif_queue_wakeup);
|
|
|
+ } else {
|
|
|
+ netif_stop_subqueue(
|
|
|
+ bnad->netdev,
|
|
|
+ txq_id);
|
|
|
+ BNAD_UPDATE_CTR(bnad,
|
|
|
+ netif_queue_stop);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
} else {
|
|
|
if (netif_carrier_ok(bnad->netdev)) {
|
|
|
- pr_warn("bna: %s link down\n",
|
|
|
+ printk(KERN_WARNING "bna: %s link down\n",
|
|
|
bnad->netdev->name);
|
|
|
netif_carrier_off(bnad->netdev);
|
|
|
BNAD_UPDATE_CTR(bnad, link_toggle);
|
|
@@ -786,8 +825,7 @@ bnad_cb_port_link_status(struct bnad *bnad,
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
|
|
|
- enum bna_cb_status status)
|
|
|
+bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
|
|
|
{
|
|
|
struct bnad *bnad = (struct bnad *)arg;
|
|
|
|
|
@@ -864,108 +902,166 @@ bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
|
|
|
+bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
|
|
|
{
|
|
|
struct bnad_tx_info *tx_info =
|
|
|
- (struct bnad_tx_info *)tcb->txq->tx->priv;
|
|
|
-
|
|
|
- if (tx_info != &bnad->tx_info[0])
|
|
|
- return;
|
|
|
+ (struct bnad_tx_info *)tx->priv;
|
|
|
+ struct bna_tcb *tcb;
|
|
|
+ u32 txq_id;
|
|
|
+ int i;
|
|
|
|
|
|
- clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
|
|
|
- netif_stop_queue(bnad->netdev);
|
|
|
- pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
|
|
|
+ for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
|
|
|
+ tcb = tx_info->tcb[i];
|
|
|
+ if (!tcb)
|
|
|
+ continue;
|
|
|
+ txq_id = tcb->id;
|
|
|
+ clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
|
|
|
+ netif_stop_subqueue(bnad->netdev, txq_id);
|
|
|
+ printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
|
|
|
+ bnad->netdev->name, txq_id);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
|
|
|
+bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
|
|
|
{
|
|
|
- struct bnad_unmap_q *unmap_q = tcb->unmap_q;
|
|
|
+ struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
|
|
|
+ struct bna_tcb *tcb;
|
|
|
+ struct bnad_unmap_q *unmap_q;
|
|
|
+ u32 txq_id;
|
|
|
+ int i;
|
|
|
|
|
|
- if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
|
|
|
- return;
|
|
|
+ for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
|
|
|
+ tcb = tx_info->tcb[i];
|
|
|
+ if (!tcb)
|
|
|
+ continue;
|
|
|
+ txq_id = tcb->id;
|
|
|
|
|
|
- clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
|
|
|
+ unmap_q = tcb->unmap_q;
|
|
|
|
|
|
- while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
|
|
|
- cpu_relax();
|
|
|
+ if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
|
|
|
+ continue;
|
|
|
|
|
|
- bnad_free_all_txbufs(bnad, tcb);
|
|
|
+ while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
|
|
|
+ cpu_relax();
|
|
|
|
|
|
- unmap_q->producer_index = 0;
|
|
|
- unmap_q->consumer_index = 0;
|
|
|
+ bnad_free_all_txbufs(bnad, tcb);
|
|
|
|
|
|
- smp_mb__before_clear_bit();
|
|
|
- clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
|
|
+ unmap_q->producer_index = 0;
|
|
|
+ unmap_q->consumer_index = 0;
|
|
|
+
|
|
|
+ smp_mb__before_clear_bit();
|
|
|
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
|
|
+
|
|
|
+ set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
|
|
|
+
|
|
|
+ if (netif_carrier_ok(bnad->netdev)) {
|
|
|
+ printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
|
|
|
+ bnad->netdev->name, txq_id);
|
|
|
+ netif_wake_subqueue(bnad->netdev, txq_id);
|
|
|
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
- * Workaround for first device enable failure & we
|
|
|
+ * Workaround for first ioceth enable failure & we
|
|
|
* get a 0 MAC address. We try to get the MAC address
|
|
|
* again here.
|
|
|
*/
|
|
|
if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
|
|
|
- bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
|
|
|
+ bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
|
|
|
bnad_set_netdev_perm_addr(bnad);
|
|
|
}
|
|
|
-
|
|
|
- set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
|
|
|
-
|
|
|
- if (netif_carrier_ok(bnad->netdev)) {
|
|
|
- pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
|
|
|
- netif_wake_queue(bnad->netdev);
|
|
|
- BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
|
|
|
+bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
|
|
|
{
|
|
|
- /* Delay only once for the whole Tx Path Shutdown */
|
|
|
- if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
|
|
|
- mdelay(BNAD_TXRX_SYNC_MDELAY);
|
|
|
+ struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
|
|
|
+ struct bna_tcb *tcb;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
|
|
|
+ tcb = tx_info->tcb[i];
|
|
|
+ if (!tcb)
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ mdelay(BNAD_TXRX_SYNC_MDELAY);
|
|
|
+ bna_tx_cleanup_complete(tx);
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-bnad_cb_rx_cleanup(struct bnad *bnad,
|
|
|
- struct bna_ccb *ccb)
|
|
|
+bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
|
|
|
{
|
|
|
- clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
|
|
|
+ struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
|
|
|
+ struct bna_ccb *ccb;
|
|
|
+ struct bnad_rx_ctrl *rx_ctrl;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ mdelay(BNAD_TXRX_SYNC_MDELAY);
|
|
|
+
|
|
|
+ for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
|
|
|
+ rx_ctrl = &rx_info->rx_ctrl[i];
|
|
|
+ ccb = rx_ctrl->ccb;
|
|
|
+ if (!ccb)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
|
|
|
+
|
|
|
+ if (ccb->rcb[1])
|
|
|
+ clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
|
|
|
|
|
|
- if (ccb->rcb[1])
|
|
|
- clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
|
|
|
+ while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
|
|
|
+ cpu_relax();
|
|
|
+ }
|
|
|
|
|
|
- if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
|
|
|
- mdelay(BNAD_TXRX_SYNC_MDELAY);
|
|
|
+ bna_rx_cleanup_complete(rx);
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
|
|
|
+bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
|
|
|
{
|
|
|
- struct bnad_unmap_q *unmap_q = rcb->unmap_q;
|
|
|
-
|
|
|
- clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
|
|
|
-
|
|
|
- if (rcb == rcb->cq->ccb->rcb[0])
|
|
|
- bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
|
|
|
+ struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
|
|
|
+ struct bna_ccb *ccb;
|
|
|
+ struct bna_rcb *rcb;
|
|
|
+ struct bnad_rx_ctrl *rx_ctrl;
|
|
|
+ struct bnad_unmap_q *unmap_q;
|
|
|
+ int i;
|
|
|
+ int j;
|
|
|
|
|
|
- bnad_free_all_rxbufs(bnad, rcb);
|
|
|
+ for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
|
|
|
+ rx_ctrl = &rx_info->rx_ctrl[i];
|
|
|
+ ccb = rx_ctrl->ccb;
|
|
|
+ if (!ccb)
|
|
|
+ continue;
|
|
|
|
|
|
- set_bit(BNAD_RXQ_STARTED, &rcb->flags);
|
|
|
+ bnad_cq_cmpl_init(bnad, ccb);
|
|
|
|
|
|
- /* Now allocate & post buffers for this RCB */
|
|
|
- /* !!Allocation in callback context */
|
|
|
- if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
|
|
|
- if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
|
|
|
- >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
|
|
|
- bnad_alloc_n_post_rxbufs(bnad, rcb);
|
|
|
- smp_mb__before_clear_bit();
|
|
|
- clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
|
|
|
+ for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
|
|
|
+ rcb = ccb->rcb[j];
|
|
|
+ if (!rcb)
|
|
|
+ continue;
|
|
|
+ bnad_free_all_rxbufs(bnad, rcb);
|
|
|
+
|
|
|
+ set_bit(BNAD_RXQ_STARTED, &rcb->flags);
|
|
|
+ unmap_q = rcb->unmap_q;
|
|
|
+
|
|
|
+ /* Now allocate & post buffers for this RCB */
|
|
|
+ /* !!Allocation in callback context */
|
|
|
+ if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
|
|
|
+ if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
|
|
|
+ >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
|
|
|
+ bnad_alloc_n_post_rxbufs(bnad, rcb);
|
|
|
+ smp_mb__before_clear_bit();
|
|
|
+ clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
|
|
|
- enum bna_cb_status status)
|
|
|
+bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
|
|
|
{
|
|
|
struct bnad *bnad = (struct bnad *)arg;
|
|
|
|
|
@@ -973,10 +1069,9 @@ bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
|
|
|
- enum bna_cb_status status)
|
|
|
+bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
|
|
|
{
|
|
|
- bnad->bnad_completions.mcast_comp_status = status;
|
|
|
+ bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
|
|
|
complete(&bnad->bnad_completions.mcast_comp);
|
|
|
}
|
|
|
|
|
@@ -995,6 +1090,13 @@ bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
|
|
|
jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+bnad_cb_enet_mtu_set(struct bnad *bnad)
|
|
|
+{
|
|
|
+ bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
|
|
|
+ complete(&bnad->bnad_completions.mtu_comp);
|
|
|
+}
|
|
|
+
|
|
|
/* Resource allocation, free functions */
|
|
|
|
|
|
static void
|
|
@@ -1073,23 +1175,17 @@ err_return:
|
|
|
|
|
|
/* Free IRQ for Mailbox */
|
|
|
static void
|
|
|
-bnad_mbox_irq_free(struct bnad *bnad,
|
|
|
- struct bna_intr_info *intr_info)
|
|
|
+bnad_mbox_irq_free(struct bnad *bnad)
|
|
|
{
|
|
|
int irq;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- if (intr_info->idl == NULL)
|
|
|
- return;
|
|
|
-
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
bnad_disable_mbox_irq(bnad);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
irq = BNAD_GET_MBOX_IRQ(bnad);
|
|
|
free_irq(irq, bnad);
|
|
|
-
|
|
|
- kfree(intr_info->idl);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1098,32 +1194,22 @@ bnad_mbox_irq_free(struct bnad *bnad,
|
|
|
* from bna
|
|
|
*/
|
|
|
static int
|
|
|
-bnad_mbox_irq_alloc(struct bnad *bnad,
|
|
|
- struct bna_intr_info *intr_info)
|
|
|
+bnad_mbox_irq_alloc(struct bnad *bnad)
|
|
|
{
|
|
|
int err = 0;
|
|
|
unsigned long irq_flags, flags;
|
|
|
u32 irq;
|
|
|
irq_handler_t irq_handler;
|
|
|
|
|
|
- /* Mbox should use only 1 vector */
|
|
|
-
|
|
|
- intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
|
|
|
- if (!intr_info->idl)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
if (bnad->cfg_flags & BNAD_CF_MSIX) {
|
|
|
irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
|
|
|
irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
|
|
|
irq_flags = 0;
|
|
|
- intr_info->intr_type = BNA_INTR_T_MSIX;
|
|
|
- intr_info->idl[0].vector = BNAD_MAILBOX_MSIX_INDEX;
|
|
|
} else {
|
|
|
irq_handler = (irq_handler_t)bnad_isr;
|
|
|
irq = bnad->pcidev->irq;
|
|
|
irq_flags = IRQF_SHARED;
|
|
|
- intr_info->intr_type = BNA_INTR_T_INTX;
|
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
@@ -1140,11 +1226,6 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
|
|
|
err = request_irq(irq, irq_handler, irq_flags,
|
|
|
bnad->mbox_irq_name, bnad);
|
|
|
|
|
|
- if (err) {
|
|
|
- kfree(intr_info->idl);
|
|
|
- intr_info->idl = NULL;
|
|
|
- }
|
|
|
-
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -1158,7 +1239,7 @@ bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
|
|
|
/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
|
|
|
static int
|
|
|
bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
|
|
|
- uint txrx_id, struct bna_intr_info *intr_info)
|
|
|
+ u32 txrx_id, struct bna_intr_info *intr_info)
|
|
|
{
|
|
|
int i, vector_start = 0;
|
|
|
u32 cfg_flags;
|
|
@@ -1241,7 +1322,7 @@ bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
|
|
|
*/
|
|
|
static int
|
|
|
bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
|
|
|
- uint tx_id, int num_txqs)
|
|
|
+ u32 tx_id, int num_txqs)
|
|
|
{
|
|
|
int i;
|
|
|
int err;
|
|
@@ -1294,7 +1375,7 @@ bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
|
|
|
*/
|
|
|
static int
|
|
|
bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
|
|
|
- uint rx_id, int num_rxps)
|
|
|
+ u32 rx_id, int num_rxps)
|
|
|
{
|
|
|
int i;
|
|
|
int err;
|
|
@@ -1338,7 +1419,7 @@ bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
|
|
|
/* Allocates memory and interrupt resources for Tx object */
|
|
|
static int
|
|
|
bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
|
|
|
- uint tx_id)
|
|
|
+ u32 tx_id)
|
|
|
{
|
|
|
int i, err = 0;
|
|
|
|
|
@@ -1407,7 +1488,7 @@ bnad_ioc_timeout(unsigned long data)
|
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
|
|
|
+ bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
}
|
|
|
|
|
@@ -1418,7 +1499,7 @@ bnad_ioc_hb_check(unsigned long data)
|
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
|
|
|
+ bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
}
|
|
|
|
|
@@ -1429,7 +1510,7 @@ bnad_iocpf_timeout(unsigned long data)
|
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
|
|
|
+ bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
}
|
|
|
|
|
@@ -1440,7 +1521,7 @@ bnad_iocpf_sem_timeout(unsigned long data)
|
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
|
|
|
+ bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
}
|
|
|
|
|
@@ -1499,7 +1580,7 @@ bnad_stats_timeout(unsigned long data)
|
|
|
return;
|
|
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bna_stats_get(&bnad->bna);
|
|
|
+ bna_hw_stats_get(&bnad->bna);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
}
|
|
|
|
|
@@ -1632,7 +1713,7 @@ bnad_napi_disable(struct bnad *bnad, u32 rx_id)
|
|
|
|
|
|
/* Should be held with conf_lock held */
|
|
|
void
|
|
|
-bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
|
|
|
+bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
|
|
|
{
|
|
|
struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
|
|
|
struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
|
|
@@ -1656,6 +1737,7 @@ bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
tx_info->tx = NULL;
|
|
|
+ tx_info->tx_id = 0;
|
|
|
|
|
|
if (0 == tx_id)
|
|
|
tasklet_kill(&bnad->tx_free_tasklet);
|
|
@@ -1665,7 +1747,7 @@ bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
|
|
|
|
|
|
/* Should be held with conf_lock held */
|
|
|
int
|
|
|
-bnad_setup_tx(struct bnad *bnad, uint tx_id)
|
|
|
+bnad_setup_tx(struct bnad *bnad, u32 tx_id)
|
|
|
{
|
|
|
int err;
|
|
|
struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
|
|
@@ -1677,10 +1759,13 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
|
|
|
struct bna_tx *tx;
|
|
|
unsigned long flags;
|
|
|
|
|
|
+ tx_info->tx_id = tx_id;
|
|
|
+
|
|
|
/* Initialize the Tx object configuration */
|
|
|
tx_config->num_txq = bnad->num_txq_per_tx;
|
|
|
tx_config->txq_depth = bnad->txq_depth;
|
|
|
tx_config->tx_type = BNA_TX_T_REGULAR;
|
|
|
+ tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
|
|
|
|
|
|
/* Initialize the tx event handlers */
|
|
|
tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
|
|
@@ -1741,14 +1826,15 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
|
|
|
{
|
|
|
rx_config->rx_type = BNA_RX_T_REGULAR;
|
|
|
rx_config->num_paths = bnad->num_rxp_per_rx;
|
|
|
+ rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
|
|
|
|
|
|
if (bnad->num_rxp_per_rx > 1) {
|
|
|
rx_config->rss_status = BNA_STATUS_T_ENABLED;
|
|
|
rx_config->rss_config.hash_type =
|
|
|
- (BFI_RSS_T_V4_TCP |
|
|
|
- BFI_RSS_T_V6_TCP |
|
|
|
- BFI_RSS_T_V4_IP |
|
|
|
- BFI_RSS_T_V6_IP);
|
|
|
+ (BFI_ENET_RSS_IPV6 |
|
|
|
+ BFI_ENET_RSS_IPV6_TCP |
|
|
|
+ BFI_ENET_RSS_IPV4 |
|
|
|
+ BFI_ENET_RSS_IPV4_TCP);
|
|
|
rx_config->rss_config.hash_mask =
|
|
|
bnad->num_rxp_per_rx - 1;
|
|
|
get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
|
|
@@ -1768,7 +1854,7 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
|
|
|
|
|
|
/* Called with mutex_lock(&bnad->conf_mutex) held */
|
|
|
void
|
|
|
-bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
|
|
|
+bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
|
|
|
{
|
|
|
struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
|
|
|
struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
|
|
@@ -1811,7 +1897,7 @@ bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
|
|
|
|
|
|
/* Called with mutex_lock(&bnad->conf_mutex) held */
|
|
|
int
|
|
|
-bnad_setup_rx(struct bnad *bnad, uint rx_id)
|
|
|
+bnad_setup_rx(struct bnad *bnad, u32 rx_id)
|
|
|
{
|
|
|
int err;
|
|
|
struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
|
|
@@ -1823,6 +1909,8 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
|
|
|
struct bna_rx *rx;
|
|
|
unsigned long flags;
|
|
|
|
|
|
+ rx_info->rx_id = rx_id;
|
|
|
+
|
|
|
/* Initialize the Rx object configuration */
|
|
|
bnad_init_rx_config(bnad, rx_config);
|
|
|
|
|
@@ -1978,7 +2066,7 @@ bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
|
|
|
u16 vid;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
|
|
|
+ BUG_ON(!(VLAN_N_VID == BFI_ENET_VLAN_ID_MAX));
|
|
|
|
|
|
for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
@@ -2031,11 +2119,11 @@ bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
|
|
|
void
|
|
|
bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
|
|
|
{
|
|
|
- struct bfi_ll_stats_mac *mac_stats;
|
|
|
- u64 bmap;
|
|
|
+ struct bfi_enet_stats_mac *mac_stats;
|
|
|
+ u32 bmap;
|
|
|
int i;
|
|
|
|
|
|
- mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
|
|
|
+ mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
|
|
|
stats->rx_errors =
|
|
|
mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
|
|
|
mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
|
|
@@ -2054,13 +2142,12 @@ bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
|
|
|
stats->rx_crc_errors = mac_stats->rx_fcs_error;
|
|
|
stats->rx_frame_errors = mac_stats->rx_alignment_error;
|
|
|
/* recv'r fifo overrun */
|
|
|
- bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
|
|
|
- ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
|
|
|
- for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
|
|
|
+ bmap = bna_rx_rid_mask(&bnad->bna);
|
|
|
+ for (i = 0; bmap; i++) {
|
|
|
if (bmap & 1) {
|
|
|
stats->rx_fifo_errors +=
|
|
|
bnad->stats.bna_stats->
|
|
|
- hw_stats->rxf_stats[i].frame_drops;
|
|
|
+ hw_stats.rxf_stats[i].frame_drops;
|
|
|
break;
|
|
|
}
|
|
|
bmap >>= 1;
|
|
@@ -2158,7 +2245,7 @@ bnad_q_num_init(struct bnad *bnad)
|
|
|
* Called with bnad->bna_lock held b'cos of cfg_flags access
|
|
|
*/
|
|
|
static void
|
|
|
-bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
|
|
|
+bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
|
|
|
{
|
|
|
bnad->num_txq_per_tx = 1;
|
|
|
if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
|
|
@@ -2171,76 +2258,72 @@ bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
|
|
|
bnad->num_rxp_per_rx = 1;
|
|
|
}
|
|
|
|
|
|
-/* Enable / disable device */
|
|
|
-static void
|
|
|
-bnad_device_disable(struct bnad *bnad)
|
|
|
+/* Enable / disable ioceth */
|
|
|
+static int
|
|
|
+bnad_ioceth_disable(struct bnad *bnad)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
-
|
|
|
- init_completion(&bnad->bnad_completions.ioc_comp);
|
|
|
+ int err = 0;
|
|
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
|
|
|
+ init_completion(&bnad->bnad_completions.ioc_comp);
|
|
|
+ bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
- wait_for_completion(&bnad->bnad_completions.ioc_comp);
|
|
|
+ wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
|
|
|
+ msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
|
|
|
+
|
|
|
+ err = bnad->bnad_completions.ioc_comp_status;
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-bnad_device_enable(struct bnad *bnad)
|
|
|
+bnad_ioceth_enable(struct bnad *bnad)
|
|
|
{
|
|
|
int err = 0;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- init_completion(&bnad->bnad_completions.ioc_comp);
|
|
|
-
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bna_device_enable(&bnad->bna.device);
|
|
|
+ init_completion(&bnad->bnad_completions.ioc_comp);
|
|
|
+ bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
|
|
|
+ bna_ioceth_enable(&bnad->bna.ioceth);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
- wait_for_completion(&bnad->bnad_completions.ioc_comp);
|
|
|
+ wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
|
|
|
+ msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
|
|
|
|
|
|
- if (bnad->bnad_completions.ioc_comp_status)
|
|
|
- err = bnad->bnad_completions.ioc_comp_status;
|
|
|
+ err = bnad->bnad_completions.ioc_comp_status;
|
|
|
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
/* Free BNA resources */
|
|
|
static void
|
|
|
-bnad_res_free(struct bnad *bnad)
|
|
|
+bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
|
|
|
+ u32 res_val_max)
|
|
|
{
|
|
|
int i;
|
|
|
- struct bna_res_info *res_info = &bnad->res_info[0];
|
|
|
|
|
|
- for (i = 0; i < BNA_RES_T_MAX; i++) {
|
|
|
- if (res_info[i].res_type == BNA_RES_T_MEM)
|
|
|
- bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
|
|
|
- else
|
|
|
- bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
|
|
|
- }
|
|
|
+ for (i = 0; i < res_val_max; i++)
|
|
|
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
|
|
|
}
|
|
|
|
|
|
/* Allocates memory and interrupt resources for BNA */
|
|
|
static int
|
|
|
-bnad_res_alloc(struct bnad *bnad)
|
|
|
+bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
|
|
|
+ u32 res_val_max)
|
|
|
{
|
|
|
int i, err;
|
|
|
- struct bna_res_info *res_info = &bnad->res_info[0];
|
|
|
|
|
|
- for (i = 0; i < BNA_RES_T_MAX; i++) {
|
|
|
- if (res_info[i].res_type == BNA_RES_T_MEM)
|
|
|
- err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
|
|
|
- else
|
|
|
- err = bnad_mbox_irq_alloc(bnad,
|
|
|
- &res_info[i].res_u.intr_info);
|
|
|
+ for (i = 0; i < res_val_max; i++) {
|
|
|
+ err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
|
|
|
if (err)
|
|
|
goto err_return;
|
|
|
}
|
|
|
return 0;
|
|
|
|
|
|
err_return:
|
|
|
- bnad_res_free(bnad);
|
|
|
+ bnad_res_free(bnad, res_info, res_val_max);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -2276,7 +2359,7 @@ bnad_enable_msix(struct bnad *bnad)
|
|
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
/* ret = #of vectors that we got */
|
|
|
- bnad_q_num_adjust(bnad, ret);
|
|
|
+ bnad_q_num_adjust(bnad, ret, 0);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
|
|
@@ -2284,6 +2367,9 @@ bnad_enable_msix(struct bnad *bnad)
|
|
|
* bnad->num_rxp_per_rx) +
|
|
|
BNAD_MAILBOX_MSIX_VECTORS;
|
|
|
|
|
|
+ if (bnad->msix_num > ret)
|
|
|
+ goto intx_mode;
|
|
|
+
|
|
|
/* Try once more with adjusted numbers */
|
|
|
/* If this fails, fall back to INTx */
|
|
|
ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
|
|
@@ -2293,6 +2379,9 @@ bnad_enable_msix(struct bnad *bnad)
|
|
|
|
|
|
} else if (ret < 0)
|
|
|
goto intx_mode;
|
|
|
+
|
|
|
+ pci_intx(bnad->pcidev, 0);
|
|
|
+
|
|
|
return;
|
|
|
|
|
|
intx_mode:
|
|
@@ -2351,12 +2440,12 @@ bnad_open(struct net_device *netdev)
|
|
|
pause_config.tx_pause = 0;
|
|
|
pause_config.rx_pause = 0;
|
|
|
|
|
|
- mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
|
|
|
+ mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
|
|
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
|
|
|
- bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
|
|
|
- bna_port_enable(&bnad->bna.port);
|
|
|
+ bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
|
|
|
+ bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
|
|
|
+ bna_enet_enable(&bnad->bna.enet);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
/* Enable broadcast */
|
|
@@ -2396,14 +2485,14 @@ bnad_stop(struct net_device *netdev)
|
|
|
/* Stop the stats timer */
|
|
|
bnad_stats_timer_stop(bnad);
|
|
|
|
|
|
- init_completion(&bnad->bnad_completions.port_comp);
|
|
|
+ init_completion(&bnad->bnad_completions.enet_comp);
|
|
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
|
|
|
- bnad_cb_port_disabled);
|
|
|
+ bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
|
|
|
+ bnad_cb_enet_disabled);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
- wait_for_completion(&bnad->bnad_completions.port_comp);
|
|
|
+ wait_for_completion(&bnad->bnad_completions.enet_comp);
|
|
|
|
|
|
bnad_cleanup_tx(bnad, 0);
|
|
|
bnad_cleanup_rx(bnad, 0);
|
|
@@ -2425,19 +2514,18 @@ static netdev_tx_t
|
|
|
bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
{
|
|
|
struct bnad *bnad = netdev_priv(netdev);
|
|
|
+ u32 txq_id = 0;
|
|
|
+ struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
|
|
|
|
|
|
u16 txq_prod, vlan_tag = 0;
|
|
|
u32 unmap_prod, wis, wis_used, wi_range;
|
|
|
u32 vectors, vect_id, i, acked;
|
|
|
- u32 tx_id;
|
|
|
int err;
|
|
|
|
|
|
- struct bnad_tx_info *tx_info;
|
|
|
- struct bna_tcb *tcb;
|
|
|
- struct bnad_unmap_q *unmap_q;
|
|
|
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
|
|
|
dma_addr_t dma_addr;
|
|
|
struct bna_txq_entry *txqent;
|
|
|
- bna_txq_wi_ctrl_flag_t flags;
|
|
|
+ u16 flags;
|
|
|
|
|
|
if (unlikely
|
|
|
(skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
|
|
@@ -2445,15 +2533,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
|
- tx_id = 0;
|
|
|
-
|
|
|
- tx_info = &bnad->tx_info[tx_id];
|
|
|
- tcb = tx_info->tcb[tx_id];
|
|
|
- unmap_q = tcb->unmap_q;
|
|
|
-
|
|
|
/*
|
|
|
* Takes care of the Tx that is scheduled between clearing the flag
|
|
|
- * and the netif_stop_queue() call.
|
|
|
+ * and the netif_stop_all_queue() call.
|
|
|
*/
|
|
|
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
|
|
|
dev_kfree_skb(skb);
|
|
@@ -2467,9 +2549,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
}
|
|
|
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
|
|
|
acked = 0;
|
|
|
- if (unlikely
|
|
|
- (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
|
|
|
- vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
|
|
|
+ if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
|
|
|
+ vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
|
|
|
if ((u16) (*tcb->hw_consumer_index) !=
|
|
|
tcb->consumer_index &&
|
|
|
!test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
|
|
@@ -2602,7 +2683,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
|
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
|
|
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
|
|
|
- u32 size = frag->size;
|
|
|
+ u16 size = frag->size;
|
|
|
|
|
|
if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
|
|
|
vect_id = 0;
|
|
@@ -2760,11 +2841,25 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-bnad_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
+bnad_mtu_set(struct bnad *bnad, int mtu)
|
|
|
{
|
|
|
- int mtu, err = 0;
|
|
|
unsigned long flags;
|
|
|
|
|
|
+ init_completion(&bnad->bnad_completions.mtu_comp);
|
|
|
+
|
|
|
+ spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
+ bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
|
|
|
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
+
|
|
|
+ wait_for_completion(&bnad->bnad_completions.mtu_comp);
|
|
|
+
|
|
|
+ return bnad->bnad_completions.mtu_comp_status;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+bnad_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
+{
|
|
|
+ int err, mtu = netdev->mtu;
|
|
|
struct bnad *bnad = netdev_priv(netdev);
|
|
|
|
|
|
if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
|
|
@@ -2774,11 +2869,10 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
|
|
|
netdev->mtu = new_mtu;
|
|
|
|
|
|
- mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
|
|
|
-
|
|
|
- spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
|
|
|
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
+ mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
|
|
|
+ err = bnad_mtu_set(bnad, mtu);
|
|
|
+ if (err)
|
|
|
+ err = -EBUSY;
|
|
|
|
|
|
mutex_unlock(&bnad->conf_mutex);
|
|
|
return err;
|
|
@@ -2968,7 +3062,7 @@ bnad_uninit(struct bnad *bnad)
|
|
|
|
|
|
/*
|
|
|
* Initialize locks
|
|
|
- a) Per device mutes used for serializing configuration
|
|
|
+ a) Per ioceth mutes used for serializing configuration
|
|
|
changes from OS interface
|
|
|
b) spin lock used to protect bna state machine
|
|
|
*/
|
|
@@ -3058,12 +3152,15 @@ bnad_pci_probe(struct pci_dev *pdev,
|
|
|
*/
|
|
|
netdev = alloc_etherdev(sizeof(struct bnad));
|
|
|
if (!netdev) {
|
|
|
- dev_err(&pdev->dev, "alloc_etherdev failed\n");
|
|
|
+ dev_err(&pdev->dev, "netdev allocation failed\n");
|
|
|
err = -ENOMEM;
|
|
|
return err;
|
|
|
}
|
|
|
bnad = netdev_priv(netdev);
|
|
|
|
|
|
+ bnad_lock_init(bnad);
|
|
|
+
|
|
|
+ mutex_lock(&bnad->conf_mutex);
|
|
|
/*
|
|
|
* PCI initialization
|
|
|
* Output : using_dac = 1 for 64 bit DMA
|
|
@@ -3073,7 +3170,6 @@ bnad_pci_probe(struct pci_dev *pdev,
|
|
|
if (err)
|
|
|
goto free_netdev;
|
|
|
|
|
|
- bnad_lock_init(bnad);
|
|
|
/*
|
|
|
* Initialize bnad structure
|
|
|
* Setup relation between pci_dev & netdev
|
|
@@ -3082,21 +3178,22 @@ bnad_pci_probe(struct pci_dev *pdev,
|
|
|
err = bnad_init(bnad, pdev, netdev);
|
|
|
if (err)
|
|
|
goto pci_uninit;
|
|
|
+
|
|
|
/* Initialize netdev structure, set up ethtool ops */
|
|
|
bnad_netdev_init(bnad, using_dac);
|
|
|
|
|
|
/* Set link to down state */
|
|
|
netif_carrier_off(netdev);
|
|
|
|
|
|
- bnad_enable_msix(bnad);
|
|
|
-
|
|
|
/* Get resource requirement form bna */
|
|
|
+ spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
bna_res_req(&bnad->res_info[0]);
|
|
|
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
/* Allocate resources from bna */
|
|
|
- err = bnad_res_alloc(bnad);
|
|
|
+ err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
|
|
|
if (err)
|
|
|
- goto free_netdev;
|
|
|
+ goto drv_uninit;
|
|
|
|
|
|
bna = &bnad->bna;
|
|
|
|
|
@@ -3106,69 +3203,102 @@ bnad_pci_probe(struct pci_dev *pdev,
|
|
|
pcidev_info.device_id = bnad->pcidev->device;
|
|
|
pcidev_info.pci_bar_kva = bnad->bar0;
|
|
|
|
|
|
- mutex_lock(&bnad->conf_mutex);
|
|
|
-
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
bnad->stats.bna_stats = &bna->stats;
|
|
|
|
|
|
+ bnad_enable_msix(bnad);
|
|
|
+ err = bnad_mbox_irq_alloc(bnad);
|
|
|
+ if (err)
|
|
|
+ goto res_free;
|
|
|
+
|
|
|
+
|
|
|
/* Set up timers */
|
|
|
- setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
|
|
|
+ setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
|
|
|
((unsigned long)bnad));
|
|
|
- setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
|
|
|
+ setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
|
|
|
((unsigned long)bnad));
|
|
|
- setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
|
|
|
+ setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
|
|
|
((unsigned long)bnad));
|
|
|
- setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
|
|
|
+ setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
|
|
|
((unsigned long)bnad));
|
|
|
|
|
|
/* Now start the timer before calling IOC */
|
|
|
- mod_timer(&bnad->bna.device.ioc.iocpf_timer,
|
|
|
+ mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
|
|
|
jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
|
|
|
|
|
|
/*
|
|
|
* Start the chip
|
|
|
- * Don't care even if err != 0, bna state machine will
|
|
|
- * deal with it
|
|
|
+ * If the call back comes with error, we bail out.
|
|
|
+ * This is a catastrophic error.
|
|
|
*/
|
|
|
- err = bnad_device_enable(bnad);
|
|
|
+ err = bnad_ioceth_enable(bnad);
|
|
|
+ if (err) {
|
|
|
+ pr_err("BNA: Initialization failed err=%d\n",
|
|
|
+ err);
|
|
|
+ goto probe_success;
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
+ if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
|
|
|
+ bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
|
|
|
+ bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
|
|
|
+ bna_attr(bna)->num_rxp - 1);
|
|
|
+ if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
|
|
|
+ bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
|
|
|
+ err = -EIO;
|
|
|
+ }
|
|
|
+ bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
|
|
|
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
+
|
|
|
+ err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
|
|
|
+ if (err)
|
|
|
+ goto disable_ioceth;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
+ bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
|
|
|
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
/* Get the burnt-in mac */
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
- bna_port_mac_get(&bna->port, &bnad->perm_addr);
|
|
|
+ bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
|
|
|
bnad_set_netdev_perm_addr(bnad);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
- mutex_unlock(&bnad->conf_mutex);
|
|
|
-
|
|
|
/* Finally, reguister with net_device layer */
|
|
|
err = register_netdev(netdev);
|
|
|
if (err) {
|
|
|
pr_err("BNA : Registering with netdev failed\n");
|
|
|
- goto disable_device;
|
|
|
+ goto probe_uninit;
|
|
|
}
|
|
|
+ set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
|
|
|
|
|
|
+probe_success:
|
|
|
+ mutex_unlock(&bnad->conf_mutex);
|
|
|
return 0;
|
|
|
|
|
|
-disable_device:
|
|
|
- mutex_lock(&bnad->conf_mutex);
|
|
|
- bnad_device_disable(bnad);
|
|
|
- del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
|
|
|
- del_timer_sync(&bnad->bna.device.ioc.sem_timer);
|
|
|
- del_timer_sync(&bnad->bna.device.ioc.hb_timer);
|
|
|
+probe_uninit:
|
|
|
+ bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
|
|
|
+disable_ioceth:
|
|
|
+ bnad_ioceth_disable(bnad);
|
|
|
+ del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
|
|
|
+ del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
|
|
|
+ del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
bna_uninit(bna);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
- mutex_unlock(&bnad->conf_mutex);
|
|
|
-
|
|
|
- bnad_res_free(bnad);
|
|
|
+ bnad_mbox_irq_free(bnad);
|
|
|
bnad_disable_msix(bnad);
|
|
|
+res_free:
|
|
|
+ bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
|
|
|
+drv_uninit:
|
|
|
+ bnad_uninit(bnad);
|
|
|
pci_uninit:
|
|
|
bnad_pci_uninit(pdev);
|
|
|
+ mutex_unlock(&bnad->conf_mutex);
|
|
|
bnad_lock_uninit(bnad);
|
|
|
- bnad_uninit(bnad);
|
|
|
free_netdev:
|
|
|
free_netdev(netdev);
|
|
|
return err;
|
|
@@ -3189,21 +3319,24 @@ bnad_pci_remove(struct pci_dev *pdev)
|
|
|
bnad = netdev_priv(netdev);
|
|
|
bna = &bnad->bna;
|
|
|
|
|
|
- unregister_netdev(netdev);
|
|
|
+ if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
|
|
|
+ unregister_netdev(netdev);
|
|
|
|
|
|
mutex_lock(&bnad->conf_mutex);
|
|
|
- bnad_device_disable(bnad);
|
|
|
- del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
|
|
|
- del_timer_sync(&bnad->bna.device.ioc.sem_timer);
|
|
|
- del_timer_sync(&bnad->bna.device.ioc.hb_timer);
|
|
|
+ bnad_ioceth_disable(bnad);
|
|
|
+ del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
|
|
|
+ del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
|
|
|
+ del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
bna_uninit(bna);
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
- mutex_unlock(&bnad->conf_mutex);
|
|
|
|
|
|
- bnad_res_free(bnad);
|
|
|
+ bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
|
|
|
+ bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
|
|
|
+ bnad_mbox_irq_free(bnad);
|
|
|
bnad_disable_msix(bnad);
|
|
|
bnad_pci_uninit(pdev);
|
|
|
+ mutex_unlock(&bnad->conf_mutex);
|
|
|
bnad_lock_uninit(bnad);
|
|
|
bnad_uninit(bnad);
|
|
|
free_netdev(netdev);
|