|
@@ -36,6 +36,7 @@
|
|
|
#define DRV_NAME "ucc_hdlc"
|
|
|
|
|
|
#define TDM_PPPOHT_SLIC_MAXIN
|
|
|
+#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
|
|
|
|
|
|
static struct ucc_tdm_info utdm_primary_info = {
|
|
|
.uf_info = {
|
|
@@ -430,12 +431,25 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
|
+static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
|
|
|
+{
|
|
|
+ u32 cecr_subblock;
|
|
|
+
|
|
|
+ cecr_subblock =
|
|
|
+ ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
|
|
|
+
|
|
|
+ qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
|
|
|
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int hdlc_tx_done(struct ucc_hdlc_private *priv)
|
|
|
{
|
|
|
/* Start from the next BD that should be filled */
|
|
|
struct net_device *dev = priv->ndev;
|
|
|
struct qe_bd *bd; /* BD pointer */
|
|
|
u16 bd_status;
|
|
|
+ int tx_restart = 0;
|
|
|
|
|
|
bd = priv->dirty_tx;
|
|
|
bd_status = ioread16be(&bd->status);
|
|
@@ -444,6 +458,15 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
|
|
|
while ((bd_status & T_R_S) == 0) {
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
+ if (bd_status & T_UN_S) { /* Underrun */
|
|
|
+ dev->stats.tx_fifo_errors++;
|
|
|
+ tx_restart = 1;
|
|
|
+ }
|
|
|
+ if (bd_status & T_CT_S) { /* Carrier lost */
|
|
|
+ dev->stats.tx_carrier_errors++;
|
|
|
+ tx_restart = 1;
|
|
|
+ }
|
|
|
+
|
|
|
/* BD contains already transmitted buffer. */
|
|
|
/* Handle the transmitted buffer and release */
|
|
|
/* the BD to be used with the current frame */
|
|
@@ -475,6 +498,9 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
|
|
|
}
|
|
|
priv->dirty_tx = bd;
|
|
|
|
|
|
+ if (tx_restart)
|
|
|
+ hdlc_tx_restart(priv);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -493,11 +519,22 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
|
|
|
|
|
|
/* while there are received buffers and BD is full (~R_E) */
|
|
|
while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
|
|
|
- if (bd_status & R_OV_S)
|
|
|
- dev->stats.rx_over_errors++;
|
|
|
- if (bd_status & R_CR_S) {
|
|
|
- dev->stats.rx_crc_errors++;
|
|
|
- dev->stats.rx_dropped++;
|
|
|
+ if (bd_status & (RX_BD_ERRORS)) {
|
|
|
+ dev->stats.rx_errors++;
|
|
|
+
|
|
|
+ if (bd_status & R_CD_S)
|
|
|
+ dev->stats.collisions++;
|
|
|
+ if (bd_status & R_OV_S)
|
|
|
+ dev->stats.rx_fifo_errors++;
|
|
|
+ if (bd_status & R_CR_S)
|
|
|
+ dev->stats.rx_crc_errors++;
|
|
|
+ if (bd_status & R_AB_S)
|
|
|
+ dev->stats.rx_over_errors++;
|
|
|
+ if (bd_status & R_NO_S)
|
|
|
+ dev->stats.rx_frame_errors++;
|
|
|
+ if (bd_status & R_LG_S)
|
|
|
+ dev->stats.rx_length_errors++;
|
|
|
+
|
|
|
goto recycle;
|
|
|
}
|
|
|
bdbuffer = priv->rx_buffer +
|
|
@@ -546,7 +583,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
|
|
|
netif_receive_skb(skb);
|
|
|
|
|
|
recycle:
|
|
|
- iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
|
|
|
+ iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
|
|
|
|
|
|
/* update to point at the next bd */
|
|
|
if (bd_status & R_W_S) {
|
|
@@ -622,7 +659,7 @@ static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
|
|
|
|
|
|
/* Errors and other events */
|
|
|
if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
|
|
|
- dev->stats.rx_errors++;
|
|
|
+ dev->stats.rx_missed_errors++;
|
|
|
if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
|
|
|
dev->stats.tx_errors++;
|
|
|
|