|
@@ -513,115 +513,6 @@ static void liquidio_deinit_pci(void)
|
|
|
pci_unregister_driver(&liquidio_pci_driver);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * \brief Stop Tx queues
|
|
|
- * @param netdev network device
|
|
|
- */
|
|
|
-static inline void txqs_stop(struct net_device *netdev)
|
|
|
-{
|
|
|
- if (netif_is_multiqueue(netdev)) {
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < netdev->num_tx_queues; i++)
|
|
|
- netif_stop_subqueue(netdev, i);
|
|
|
- } else {
|
|
|
- netif_stop_queue(netdev);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * \brief Start Tx queues
|
|
|
- * @param netdev network device
|
|
|
- */
|
|
|
-static inline void txqs_start(struct net_device *netdev)
|
|
|
-{
|
|
|
- if (netif_is_multiqueue(netdev)) {
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < netdev->num_tx_queues; i++)
|
|
|
- netif_start_subqueue(netdev, i);
|
|
|
- } else {
|
|
|
- netif_start_queue(netdev);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * \brief Wake Tx queues
|
|
|
- * @param netdev network device
|
|
|
- */
|
|
|
-static inline void txqs_wake(struct net_device *netdev)
|
|
|
-{
|
|
|
- struct lio *lio = GET_LIO(netdev);
|
|
|
-
|
|
|
- if (netif_is_multiqueue(netdev)) {
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < netdev->num_tx_queues; i++) {
|
|
|
- int qno = lio->linfo.txpciq[i %
|
|
|
- lio->oct_dev->num_iqs].s.q_no;
|
|
|
-
|
|
|
- if (__netif_subqueue_stopped(netdev, i)) {
|
|
|
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
|
|
|
- tx_restart, 1);
|
|
|
- netif_wake_subqueue(netdev, i);
|
|
|
- }
|
|
|
- }
|
|
|
- } else {
|
|
|
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
|
|
|
- tx_restart, 1);
|
|
|
- netif_wake_queue(netdev);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * \brief Stop Tx queue
|
|
|
- * @param netdev network device
|
|
|
- */
|
|
|
-static void stop_txq(struct net_device *netdev)
|
|
|
-{
|
|
|
- txqs_stop(netdev);
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * \brief Start Tx queue
|
|
|
- * @param netdev network device
|
|
|
- */
|
|
|
-static void start_txq(struct net_device *netdev)
|
|
|
-{
|
|
|
- struct lio *lio = GET_LIO(netdev);
|
|
|
-
|
|
|
- if (lio->linfo.link.s.link_up) {
|
|
|
- txqs_start(netdev);
|
|
|
- return;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * \brief Wake a queue
|
|
|
- * @param netdev network device
|
|
|
- * @param q which queue to wake
|
|
|
- */
|
|
|
-static inline void wake_q(struct net_device *netdev, int q)
|
|
|
-{
|
|
|
- if (netif_is_multiqueue(netdev))
|
|
|
- netif_wake_subqueue(netdev, q);
|
|
|
- else
|
|
|
- netif_wake_queue(netdev);
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * \brief Stop a queue
|
|
|
- * @param netdev network device
|
|
|
- * @param q which queue to stop
|
|
|
- */
|
|
|
-static inline void stop_q(struct net_device *netdev, int q)
|
|
|
-{
|
|
|
- if (netif_is_multiqueue(netdev))
|
|
|
- netif_stop_subqueue(netdev, q);
|
|
|
- else
|
|
|
- netif_stop_queue(netdev);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* \brief Check Tx queue status, and take appropriate action
|
|
|
* @param lio per-network private data
|
|
@@ -629,33 +520,24 @@ static inline void stop_q(struct net_device *netdev, int q)
|
|
|
*/
|
|
|
static inline int check_txq_status(struct lio *lio)
|
|
|
{
|
|
|
+ int numqs = lio->netdev->num_tx_queues;
|
|
|
int ret_val = 0;
|
|
|
+ int q, iq;
|
|
|
|
|
|
- if (netif_is_multiqueue(lio->netdev)) {
|
|
|
- int numqs = lio->netdev->num_tx_queues;
|
|
|
- int q, iq = 0;
|
|
|
-
|
|
|
- /* check each sub-queue state */
|
|
|
- for (q = 0; q < numqs; q++) {
|
|
|
- iq = lio->linfo.txpciq[q %
|
|
|
- lio->oct_dev->num_iqs].s.q_no;
|
|
|
- if (octnet_iq_is_full(lio->oct_dev, iq))
|
|
|
- continue;
|
|
|
- if (__netif_subqueue_stopped(lio->netdev, q)) {
|
|
|
- wake_q(lio->netdev, q);
|
|
|
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
|
|
|
- tx_restart, 1);
|
|
|
- ret_val++;
|
|
|
- }
|
|
|
+ /* check each sub-queue state */
|
|
|
+ for (q = 0; q < numqs; q++) {
|
|
|
+ iq = lio->linfo.txpciq[q %
|
|
|
+ lio->oct_dev->num_iqs].s.q_no;
|
|
|
+ if (octnet_iq_is_full(lio->oct_dev, iq))
|
|
|
+ continue;
|
|
|
+ if (__netif_subqueue_stopped(lio->netdev, q)) {
|
|
|
+ netif_wake_subqueue(lio->netdev, q);
|
|
|
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
|
|
|
+ tx_restart, 1);
|
|
|
+ ret_val++;
|
|
|
}
|
|
|
- } else {
|
|
|
- if (octnet_iq_is_full(lio->oct_dev, lio->txq))
|
|
|
- return 0;
|
|
|
- wake_q(lio->netdev, lio->txq);
|
|
|
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
|
|
|
- tx_restart, 1);
|
|
|
- ret_val = 1;
|
|
|
}
|
|
|
+
|
|
|
return ret_val;
|
|
|
}
|
|
|
|
|
@@ -900,11 +782,11 @@ static inline void update_link_status(struct net_device *netdev,
|
|
|
if (lio->linfo.link.s.link_up) {
|
|
|
dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
|
|
|
netif_carrier_on(netdev);
|
|
|
- txqs_wake(netdev);
|
|
|
+ wake_txqs(netdev);
|
|
|
} else {
|
|
|
dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
|
|
|
netif_carrier_off(netdev);
|
|
|
- stop_txq(netdev);
|
|
|
+ stop_txqs(netdev);
|
|
|
}
|
|
|
if (lio->linfo.link.s.mtu != current_max_mtu) {
|
|
|
netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
|
|
@@ -1752,16 +1634,6 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
|
|
|
-{
|
|
|
- int q = 0;
|
|
|
-
|
|
|
- if (netif_is_multiqueue(lio->netdev))
|
|
|
- q = skb->queue_mapping % lio->linfo.num_txpciq;
|
|
|
-
|
|
|
- return q;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* \brief Check Tx queue state for a given network buffer
|
|
|
* @param lio per-network private data
|
|
@@ -1769,22 +1641,17 @@ static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
|
|
|
*/
|
|
|
static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
|
|
|
{
|
|
|
- int q = 0, iq = 0;
|
|
|
+ int q, iq;
|
|
|
|
|
|
- if (netif_is_multiqueue(lio->netdev)) {
|
|
|
- q = skb->queue_mapping;
|
|
|
- iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
|
|
|
- } else {
|
|
|
- iq = lio->txq;
|
|
|
- q = iq;
|
|
|
- }
|
|
|
+ q = skb->queue_mapping;
|
|
|
+ iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
|
|
|
|
|
|
if (octnet_iq_is_full(lio->oct_dev, iq))
|
|
|
return 0;
|
|
|
|
|
|
if (__netif_subqueue_stopped(lio->netdev, q)) {
|
|
|
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
|
|
|
- wake_q(lio->netdev, q);
|
|
|
+ netif_wake_subqueue(lio->netdev, q);
|
|
|
}
|
|
|
return 1;
|
|
|
}
|
|
@@ -2224,7 +2091,7 @@ static int liquidio_open(struct net_device *netdev)
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
- start_txq(netdev);
|
|
|
+ start_txqs(netdev);
|
|
|
|
|
|
/* tell Octeon to start forwarding packets to host */
|
|
|
send_rx_ctrl_cmd(lio, 1);
|
|
@@ -2666,14 +2533,9 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
lio = GET_LIO(netdev);
|
|
|
oct = lio->oct_dev;
|
|
|
|
|
|
- if (netif_is_multiqueue(netdev)) {
|
|
|
- q_idx = skb->queue_mapping;
|
|
|
- q_idx = (q_idx % (lio->linfo.num_txpciq));
|
|
|
- tag = q_idx;
|
|
|
- iq_no = lio->linfo.txpciq[q_idx].s.q_no;
|
|
|
- } else {
|
|
|
- iq_no = lio->txq;
|
|
|
- }
|
|
|
+ q_idx = skb_iq(lio, skb);
|
|
|
+ tag = q_idx;
|
|
|
+ iq_no = lio->linfo.txpciq[q_idx].s.q_no;
|
|
|
|
|
|
stats = &oct->instr_queue[iq_no]->stats;
|
|
|
|
|
@@ -2704,23 +2566,14 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
|
|
ndata.q_no = iq_no;
|
|
|
|
|
|
- if (netif_is_multiqueue(netdev)) {
|
|
|
- if (octnet_iq_is_full(oct, ndata.q_no)) {
|
|
|
- /* defer sending if queue is full */
|
|
|
- netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
|
|
|
- ndata.q_no);
|
|
|
- stats->tx_iq_busy++;
|
|
|
- return NETDEV_TX_BUSY;
|
|
|
- }
|
|
|
- } else {
|
|
|
- if (octnet_iq_is_full(oct, lio->txq)) {
|
|
|
- /* defer sending if queue is full */
|
|
|
- stats->tx_iq_busy++;
|
|
|
- netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
|
|
|
- lio->txq);
|
|
|
- return NETDEV_TX_BUSY;
|
|
|
- }
|
|
|
+ if (octnet_iq_is_full(oct, ndata.q_no)) {
|
|
|
+ /* defer sending if queue is full */
|
|
|
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
|
|
|
+ ndata.q_no);
|
|
|
+ stats->tx_iq_busy++;
|
|
|
+ return NETDEV_TX_BUSY;
|
|
|
}
|
|
|
+
|
|
|
/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
|
|
|
* lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
|
|
|
*/
|
|
@@ -2876,7 +2729,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
|
|
|
|
|
|
if (status == IQ_SEND_STOP)
|
|
|
- stop_q(netdev, q_idx);
|
|
|
+ netif_stop_subqueue(netdev, q_idx);
|
|
|
|
|
|
netif_trans_update(netdev);
|
|
|
|
|
@@ -2915,7 +2768,7 @@ static void liquidio_tx_timeout(struct net_device *netdev)
|
|
|
"Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
|
|
|
netdev->stats.tx_dropped);
|
|
|
netif_trans_update(netdev);
|
|
|
- txqs_wake(netdev);
|
|
|
+ wake_txqs(netdev);
|
|
|
}
|
|
|
|
|
|
static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
|