|
@@ -52,7 +52,12 @@
|
|
|
NETIF_MSG_RX_ERR| \
|
|
|
NETIF_MSG_TX_ERR)
|
|
|
|
|
|
+#define SH_ETH_OFFSET_DEFAULTS \
|
|
|
+ [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
|
|
|
+
|
|
|
static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
|
|
|
+ SH_ETH_OFFSET_DEFAULTS,
|
|
|
+
|
|
|
[EDSR] = 0x0000,
|
|
|
[EDMR] = 0x0400,
|
|
|
[EDTRR] = 0x0408,
|
|
@@ -132,9 +137,6 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
|
|
|
[TSU_POST3] = 0x0078,
|
|
|
[TSU_POST4] = 0x007c,
|
|
|
[TSU_ADRH0] = 0x0100,
|
|
|
- [TSU_ADRL0] = 0x0104,
|
|
|
- [TSU_ADRH31] = 0x01f8,
|
|
|
- [TSU_ADRL31] = 0x01fc,
|
|
|
|
|
|
[TXNLCR0] = 0x0080,
|
|
|
[TXALCR0] = 0x0084,
|
|
@@ -151,6 +153,8 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
|
|
|
};
|
|
|
|
|
|
static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
|
|
|
+ SH_ETH_OFFSET_DEFAULTS,
|
|
|
+
|
|
|
[EDSR] = 0x0000,
|
|
|
[EDMR] = 0x0400,
|
|
|
[EDTRR] = 0x0408,
|
|
@@ -199,9 +203,6 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
|
|
|
[TSU_ADSBSY] = 0x0060,
|
|
|
[TSU_TEN] = 0x0064,
|
|
|
[TSU_ADRH0] = 0x0100,
|
|
|
- [TSU_ADRL0] = 0x0104,
|
|
|
- [TSU_ADRH31] = 0x01f8,
|
|
|
- [TSU_ADRL31] = 0x01fc,
|
|
|
|
|
|
[TXNLCR0] = 0x0080,
|
|
|
[TXALCR0] = 0x0084,
|
|
@@ -210,6 +211,8 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
|
|
|
};
|
|
|
|
|
|
static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
|
|
|
+ SH_ETH_OFFSET_DEFAULTS,
|
|
|
+
|
|
|
[ECMR] = 0x0300,
|
|
|
[RFLR] = 0x0308,
|
|
|
[ECSR] = 0x0310,
|
|
@@ -256,6 +259,8 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
|
|
|
};
|
|
|
|
|
|
static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
|
|
|
+ SH_ETH_OFFSET_DEFAULTS,
|
|
|
+
|
|
|
[ECMR] = 0x0100,
|
|
|
[RFLR] = 0x0108,
|
|
|
[ECSR] = 0x0110,
|
|
@@ -308,6 +313,8 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
|
|
|
};
|
|
|
|
|
|
static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
|
|
|
+ SH_ETH_OFFSET_DEFAULTS,
|
|
|
+
|
|
|
[EDMR] = 0x0000,
|
|
|
[EDTRR] = 0x0004,
|
|
|
[EDRRR] = 0x0008,
|
|
@@ -392,8 +399,6 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
|
|
|
[FWALCR1] = 0x00b4,
|
|
|
|
|
|
[TSU_ADRH0] = 0x0100,
|
|
|
- [TSU_ADRL0] = 0x0104,
|
|
|
- [TSU_ADRL31] = 0x01fc,
|
|
|
};
|
|
|
|
|
|
static void sh_eth_rcv_snd_disable(struct net_device *ndev);
|
|
@@ -588,6 +593,7 @@ static struct sh_eth_cpu_data sh7757_data = {
|
|
|
.no_ade = 1,
|
|
|
.rpadir = 1,
|
|
|
.rpadir_value = 2 << 16,
|
|
|
+ .rtrate = 1,
|
|
|
};
|
|
|
|
|
|
#define SH_GIGA_ETH_BASE 0xfee00000UL
|
|
@@ -1411,6 +1417,9 @@ static int sh_eth_txfree(struct net_device *ndev)
|
|
|
break;
|
|
|
/* TACT bit must be checked before all the following reads */
|
|
|
rmb();
|
|
|
+ netif_info(mdp, tx_done, ndev,
|
|
|
+ "tx entry %d status 0x%08x\n",
|
|
|
+ entry, edmac_to_cpu(mdp, txdesc->status));
|
|
|
/* Free the original skb. */
|
|
|
if (mdp->tx_skbuff[entry]) {
|
|
|
dma_unmap_single(&ndev->dev, txdesc->addr,
|
|
@@ -1456,6 +1465,10 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|
|
if (--boguscnt < 0)
|
|
|
break;
|
|
|
|
|
|
+ netif_info(mdp, rx_status, ndev,
|
|
|
+ "rx entry %d status 0x%08x len %d\n",
|
|
|
+ entry, desc_status, pkt_len);
|
|
|
+
|
|
|
if (!(desc_status & RDFEND))
|
|
|
ndev->stats.rx_length_errors++;
|
|
|
|
|
@@ -1500,6 +1513,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|
|
netif_receive_skb(skb);
|
|
|
ndev->stats.rx_packets++;
|
|
|
ndev->stats.rx_bytes += pkt_len;
|
|
|
+ if (desc_status & RD_RFS8)
|
|
|
+ ndev->stats.multicast++;
|
|
|
}
|
|
|
entry = (++mdp->cur_rx) % mdp->num_rx_ring;
|
|
|
rxdesc = &mdp->rx_ring[entry];
|
|
@@ -1542,7 +1557,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|
|
/* If we don't need to check status, don't. -KDU */
|
|
|
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
|
|
|
/* fix the values for the next receiving if RDE is set */
|
|
|
- if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
|
|
|
+ if (intr_status & EESR_RDE &&
|
|
|
+ mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) {
|
|
|
u32 count = (sh_eth_read(ndev, RDFAR) -
|
|
|
sh_eth_read(ndev, RDLAR)) >> 4;
|
|
|
|
|
@@ -1929,6 +1945,192 @@ error_exit:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
|
|
|
+ * version must be bumped as well. Just adding registers up to that
|
|
|
+ * limit is fine, as long as the existing register indices don't
|
|
|
+ * change.
|
|
|
+ */
|
|
|
+#define SH_ETH_REG_DUMP_VERSION 1
|
|
|
+#define SH_ETH_REG_DUMP_MAX_REGS 256
|
|
|
+
|
|
|
+static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
|
|
|
+{
|
|
|
+ struct sh_eth_private *mdp = netdev_priv(ndev);
|
|
|
+ struct sh_eth_cpu_data *cd = mdp->cd;
|
|
|
+ u32 *valid_map;
|
|
|
+ size_t len;
|
|
|
+
|
|
|
+ BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
|
|
|
+
|
|
|
+ /* Dump starts with a bitmap that tells ethtool which
|
|
|
+ * registers are defined for this chip.
|
|
|
+ */
|
|
|
+ len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
|
|
|
+ if (buf) {
|
|
|
+ valid_map = buf;
|
|
|
+ buf += len;
|
|
|
+ } else {
|
|
|
+ valid_map = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Add a register to the dump, if it has a defined offset.
|
|
|
+ * This automatically skips most undefined registers, but for
|
|
|
+ * some it is also necessary to check a capability flag in
|
|
|
+ * struct sh_eth_cpu_data.
|
|
|
+ */
|
|
|
+#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
|
|
|
+#define add_reg_from(reg, read_expr) do { \
|
|
|
+ if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \
|
|
|
+ if (buf) { \
|
|
|
+ mark_reg_valid(reg); \
|
|
|
+ *buf++ = read_expr; \
|
|
|
+ } \
|
|
|
+ ++len; \
|
|
|
+ } \
|
|
|
+ } while (0)
|
|
|
+#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
|
|
|
+#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
|
|
|
+
|
|
|
+ add_reg(EDSR);
|
|
|
+ add_reg(EDMR);
|
|
|
+ add_reg(EDTRR);
|
|
|
+ add_reg(EDRRR);
|
|
|
+ add_reg(EESR);
|
|
|
+ add_reg(EESIPR);
|
|
|
+ add_reg(TDLAR);
|
|
|
+ add_reg(TDFAR);
|
|
|
+ add_reg(TDFXR);
|
|
|
+ add_reg(TDFFR);
|
|
|
+ add_reg(RDLAR);
|
|
|
+ add_reg(RDFAR);
|
|
|
+ add_reg(RDFXR);
|
|
|
+ add_reg(RDFFR);
|
|
|
+ add_reg(TRSCER);
|
|
|
+ add_reg(RMFCR);
|
|
|
+ add_reg(TFTR);
|
|
|
+ add_reg(FDR);
|
|
|
+ add_reg(RMCR);
|
|
|
+ add_reg(TFUCR);
|
|
|
+ add_reg(RFOCR);
|
|
|
+ if (cd->rmiimode)
|
|
|
+ add_reg(RMIIMODE);
|
|
|
+ add_reg(FCFTR);
|
|
|
+ if (cd->rpadir)
|
|
|
+ add_reg(RPADIR);
|
|
|
+ if (!cd->no_trimd)
|
|
|
+ add_reg(TRIMD);
|
|
|
+ add_reg(ECMR);
|
|
|
+ add_reg(ECSR);
|
|
|
+ add_reg(ECSIPR);
|
|
|
+ add_reg(PIR);
|
|
|
+ if (!cd->no_psr)
|
|
|
+ add_reg(PSR);
|
|
|
+ add_reg(RDMLR);
|
|
|
+ add_reg(RFLR);
|
|
|
+ add_reg(IPGR);
|
|
|
+ if (cd->apr)
|
|
|
+ add_reg(APR);
|
|
|
+ if (cd->mpr)
|
|
|
+ add_reg(MPR);
|
|
|
+ add_reg(RFCR);
|
|
|
+ add_reg(RFCF);
|
|
|
+ if (cd->tpauser)
|
|
|
+ add_reg(TPAUSER);
|
|
|
+ add_reg(TPAUSECR);
|
|
|
+ add_reg(GECMR);
|
|
|
+ if (cd->bculr)
|
|
|
+ add_reg(BCULR);
|
|
|
+ add_reg(MAHR);
|
|
|
+ add_reg(MALR);
|
|
|
+ add_reg(TROCR);
|
|
|
+ add_reg(CDCR);
|
|
|
+ add_reg(LCCR);
|
|
|
+ add_reg(CNDCR);
|
|
|
+ add_reg(CEFCR);
|
|
|
+ add_reg(FRECR);
|
|
|
+ add_reg(TSFRCR);
|
|
|
+ add_reg(TLFRCR);
|
|
|
+ add_reg(CERCR);
|
|
|
+ add_reg(CEECR);
|
|
|
+ add_reg(MAFCR);
|
|
|
+ if (cd->rtrate)
|
|
|
+ add_reg(RTRATE);
|
|
|
+ if (cd->hw_crc)
|
|
|
+ add_reg(CSMR);
|
|
|
+ if (cd->select_mii)
|
|
|
+ add_reg(RMII_MII);
|
|
|
+ add_reg(ARSTR);
|
|
|
+ if (cd->tsu) {
|
|
|
+ add_tsu_reg(TSU_CTRST);
|
|
|
+ add_tsu_reg(TSU_FWEN0);
|
|
|
+ add_tsu_reg(TSU_FWEN1);
|
|
|
+ add_tsu_reg(TSU_FCM);
|
|
|
+ add_tsu_reg(TSU_BSYSL0);
|
|
|
+ add_tsu_reg(TSU_BSYSL1);
|
|
|
+ add_tsu_reg(TSU_PRISL0);
|
|
|
+ add_tsu_reg(TSU_PRISL1);
|
|
|
+ add_tsu_reg(TSU_FWSL0);
|
|
|
+ add_tsu_reg(TSU_FWSL1);
|
|
|
+ add_tsu_reg(TSU_FWSLC);
|
|
|
+ add_tsu_reg(TSU_QTAG0);
|
|
|
+ add_tsu_reg(TSU_QTAG1);
|
|
|
+ add_tsu_reg(TSU_QTAGM0);
|
|
|
+ add_tsu_reg(TSU_QTAGM1);
|
|
|
+ add_tsu_reg(TSU_FWSR);
|
|
|
+ add_tsu_reg(TSU_FWINMK);
|
|
|
+ add_tsu_reg(TSU_ADQT0);
|
|
|
+ add_tsu_reg(TSU_ADQT1);
|
|
|
+ add_tsu_reg(TSU_VTAG0);
|
|
|
+ add_tsu_reg(TSU_VTAG1);
|
|
|
+ add_tsu_reg(TSU_ADSBSY);
|
|
|
+ add_tsu_reg(TSU_TEN);
|
|
|
+ add_tsu_reg(TSU_POST1);
|
|
|
+ add_tsu_reg(TSU_POST2);
|
|
|
+ add_tsu_reg(TSU_POST3);
|
|
|
+ add_tsu_reg(TSU_POST4);
|
|
|
+ if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) {
|
|
|
+ /* This is the start of a table, not just a single
|
|
|
+ * register.
|
|
|
+ */
|
|
|
+ if (buf) {
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ mark_reg_valid(TSU_ADRH0);
|
|
|
+ for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
|
|
|
+ *buf++ = ioread32(
|
|
|
+ mdp->tsu_addr +
|
|
|
+ mdp->reg_offset[TSU_ADRH0] +
|
|
|
+ i * 4);
|
|
|
+ }
|
|
|
+ len += SH_ETH_TSU_CAM_ENTRIES * 2;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+#undef mark_reg_valid
|
|
|
+#undef add_reg_from
|
|
|
+#undef add_reg
|
|
|
+#undef add_tsu_reg
|
|
|
+
|
|
|
+ return len * 4;
|
|
|
+}
|
|
|
+
|
|
|
+static int sh_eth_get_regs_len(struct net_device *ndev)
|
|
|
+{
|
|
|
+ return __sh_eth_get_regs(ndev, NULL);
|
|
|
+}
|
|
|
+
|
|
|
+static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
|
|
|
+ void *buf)
|
|
|
+{
|
|
|
+ struct sh_eth_private *mdp = netdev_priv(ndev);
|
|
|
+
|
|
|
+ regs->version = SH_ETH_REG_DUMP_VERSION;
|
|
|
+
|
|
|
+ pm_runtime_get_sync(&mdp->pdev->dev);
|
|
|
+ __sh_eth_get_regs(ndev, buf);
|
|
|
+ pm_runtime_put_sync(&mdp->pdev->dev);
|
|
|
+}
|
|
|
+
|
|
|
static int sh_eth_nway_reset(struct net_device *ndev)
|
|
|
{
|
|
|
struct sh_eth_private *mdp = netdev_priv(ndev);
|
|
@@ -2074,6 +2276,8 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
|
|
|
static const struct ethtool_ops sh_eth_ethtool_ops = {
|
|
|
.get_settings = sh_eth_get_settings,
|
|
|
.set_settings = sh_eth_set_settings,
|
|
|
+ .get_regs_len = sh_eth_get_regs_len,
|
|
|
+ .get_regs = sh_eth_get_regs,
|
|
|
.nway_reset = sh_eth_nway_reset,
|
|
|
.get_msglevel = sh_eth_get_msglevel,
|
|
|
.set_msglevel = sh_eth_set_msglevel,
|
|
@@ -2213,6 +2417,22 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
|
+/* The statistics registers have write-clear behaviour, which means we
|
|
|
+ * will lose any increment between the read and write. We mitigate
|
|
|
+ * this by only clearing when we read a non-zero value, so we will
|
|
|
+ * never falsely report a total of zero.
|
|
|
+ */
|
|
|
+static void
|
|
|
+sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
|
|
|
+{
|
|
|
+ u32 delta = sh_eth_read(ndev, reg);
|
|
|
+
|
|
|
+ if (delta) {
|
|
|
+ *stat += delta;
|
|
|
+ sh_eth_write(ndev, 0, reg);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
|
|
|
{
|
|
|
struct sh_eth_private *mdp = netdev_priv(ndev);
|
|
@@ -2223,21 +2443,18 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
|
|
|
if (!mdp->is_opened)
|
|
|
return &ndev->stats;
|
|
|
|
|
|
- ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
|
|
|
- sh_eth_write(ndev, 0, TROCR); /* (write clear) */
|
|
|
- ndev->stats.collisions += sh_eth_read(ndev, CDCR);
|
|
|
- sh_eth_write(ndev, 0, CDCR); /* (write clear) */
|
|
|
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
|
|
|
- sh_eth_write(ndev, 0, LCCR); /* (write clear) */
|
|
|
+ sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
|
|
|
+ sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
|
|
|
+ sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
|
|
|
|
|
|
if (sh_eth_is_gether(mdp)) {
|
|
|
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
|
|
|
- sh_eth_write(ndev, 0, CERCR); /* (write clear) */
|
|
|
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
|
|
|
- sh_eth_write(ndev, 0, CEECR); /* (write clear) */
|
|
|
+ sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
|
|
|
+ CERCR);
|
|
|
+ sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
|
|
|
+ CEECR);
|
|
|
} else {
|
|
|
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
|
|
|
- sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
|
|
|
+ sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
|
|
|
+ CNDCR);
|
|
|
}
|
|
|
|
|
|
return &ndev->stats;
|