|
@@ -183,6 +183,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
|
|
work_done = nps_enet_rx_handler(ndev);
|
|
work_done = nps_enet_rx_handler(ndev);
|
|
if (work_done < budget) {
|
|
if (work_done < budget) {
|
|
u32 buf_int_enable_value = 0;
|
|
u32 buf_int_enable_value = 0;
|
|
|
|
+ u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
|
|
|
|
+ u32 tx_ctrl_ct =
|
|
|
|
+ (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
|
|
|
|
|
|
napi_complete(napi);
|
|
napi_complete(napi);
|
|
|
|
|
|
@@ -192,6 +195,18 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
|
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
|
|
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
|
|
buf_int_enable_value);
|
|
buf_int_enable_value);
|
|
|
|
+
|
|
|
|
+ /* in case we will get a tx interrupt while interrupts
|
|
|
|
+ * are masked, we will lose it since the tx is edge interrupt.
|
|
|
|
+ * specifically, while executing the code section above,
|
|
|
|
+ * between nps_enet_tx_handler and the interrupts enable, all
|
|
|
|
+ * tx requests will be stuck until we will get an rx interrupt.
|
|
|
|
+ * the two code lines below will solve this situation by
|
|
|
|
+ * re-adding ourselves to the poll list.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+ if (priv->tx_skb && !tx_ctrl_ct)
|
|
|
|
+ napi_reschedule(napi);
|
|
}
|
|
}
|
|
|
|
|
|
return work_done;
|
|
return work_done;
|