|
@@ -25,7 +25,9 @@
|
|
|
#define EMAC_SGMII_PHY_SPEED_CFG1 0x0074
|
|
|
#define EMAC_SGMII_PHY_IRQ_CMD 0x00ac
|
|
|
#define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x00b0
|
|
|
+#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x00b4
|
|
|
#define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x00b8
|
|
|
+#define EMAC_SGMII_PHY_RX_CHK_STATUS 0x00d4
|
|
|
|
|
|
#define FORCE_AN_TX_CFG BIT(5)
|
|
|
#define FORCE_AN_RX_CFG BIT(4)
|
|
@@ -36,6 +38,8 @@
|
|
|
#define SPDMODE_100 BIT(0)
|
|
|
#define SPDMODE_10 0
|
|
|
|
|
|
+#define CDR_ALIGN_DET BIT(6)
|
|
|
+
|
|
|
#define IRQ_GLOBAL_CLEAR BIT(0)
|
|
|
|
|
|
#define DECODE_CODE_ERR BIT(7)
|
|
@@ -44,6 +48,7 @@
|
|
|
#define SGMII_PHY_IRQ_CLR_WAIT_TIME 10
|
|
|
|
|
|
#define SGMII_PHY_INTERRUPT_ERR (DECODE_CODE_ERR | DECODE_DISP_ERR)
|
|
|
+#define SGMII_ISR_MASK (SGMII_PHY_INTERRUPT_ERR)
|
|
|
|
|
|
#define SERDES_START_WAIT_TIMES 100
|
|
|
|
|
@@ -96,6 +101,51 @@ static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/* The number of decode errors that triggers a reset */
|
|
|
+#define DECODE_ERROR_LIMIT 2
|
|
|
+
|
|
|
+static irqreturn_t emac_sgmii_interrupt(int irq, void *data)
|
|
|
+{
|
|
|
+ struct emac_adapter *adpt = data;
|
|
|
+ struct emac_sgmii *phy = &adpt->phy;
|
|
|
+ u32 status;
|
|
|
+
|
|
|
+ status = readl(phy->base + EMAC_SGMII_PHY_INTERRUPT_STATUS);
|
|
|
+ status &= SGMII_ISR_MASK;
|
|
|
+ if (!status)
|
|
|
+ return IRQ_HANDLED;
|
|
|
+
|
|
|
+ /* If we get a decoding error and CDR is not locked, then try
|
|
|
+ * resetting the internal PHY. The internal PHY uses an embedded
|
|
|
+ * clock with Clock and Data Recovery (CDR) to recover the
|
|
|
+ * clock and data.
|
|
|
+ */
|
|
|
+ if (status & SGMII_PHY_INTERRUPT_ERR) {
|
|
|
+ int count;
|
|
|
+
|
|
|
+ /* The SGMII is capable of recovering from some decode
|
|
|
+ * errors automatically. However, if we get multiple
|
|
|
+ * decode errors in a row, then assume that something
|
|
|
+ * is wrong and reset the interface.
|
|
|
+ */
|
|
|
+ count = atomic_inc_return(&phy->decode_error_count);
|
|
|
+ if (count == DECODE_ERROR_LIMIT) {
|
|
|
+ schedule_work(&adpt->work_thread);
|
|
|
+ atomic_set(&phy->decode_error_count, 0);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ /* We only care about consecutive decode errors. */
|
|
|
+ atomic_set(&phy->decode_error_count, 0);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (emac_sgmii_irq_clear(adpt, status)) {
|
|
|
+ netdev_warn(adpt->netdev, "failed to clear SGMII interrupt\n");
|
|
|
+ schedule_work(&adpt->work_thread);
|
|
|
+ }
|
|
|
+
|
|
|
+ return IRQ_HANDLED;
|
|
|
+}
|
|
|
+
|
|
|
static void emac_sgmii_reset_prepare(struct emac_adapter *adpt)
|
|
|
{
|
|
|
struct emac_sgmii *phy = &adpt->phy;
|
|
@@ -129,6 +179,68 @@ void emac_sgmii_reset(struct emac_adapter *adpt)
|
|
|
ret);
|
|
|
}
|
|
|
|
|
|
+static int emac_sgmii_open(struct emac_adapter *adpt)
|
|
|
+{
|
|
|
+ struct emac_sgmii *sgmii = &adpt->phy;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (sgmii->irq) {
|
|
|
+ /* Make sure interrupts are cleared and disabled first */
|
|
|
+ ret = emac_sgmii_irq_clear(adpt, 0xff);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
|
|
|
+
|
|
|
+ ret = request_irq(sgmii->irq, emac_sgmii_interrupt, 0,
|
|
|
+ "emac-sgmii", adpt);
|
|
|
+ if (ret) {
|
|
|
+ netdev_err(adpt->netdev,
|
|
|
+ "could not register handler for internal PHY\n");
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int emac_sgmii_close(struct emac_adapter *adpt)
|
|
|
+{
|
|
|
+ struct emac_sgmii *sgmii = &adpt->phy;
|
|
|
+
|
|
|
+ /* Make sure interrupts are disabled */
|
|
|
+ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
|
|
|
+ free_irq(sgmii->irq, adpt);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/* The error interrupts are only valid after the link is up */
|
|
|
+static int emac_sgmii_link_up(struct emac_adapter *adpt)
|
|
|
+{
|
|
|
+ struct emac_sgmii *sgmii = &adpt->phy;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ /* Clear and enable interrupts */
|
|
|
+ ret = emac_sgmii_irq_clear(adpt, 0xff);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ writel(SGMII_ISR_MASK, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int emac_sgmii_link_down(struct emac_adapter *adpt)
|
|
|
+{
|
|
|
+ struct emac_sgmii *sgmii = &adpt->phy;
|
|
|
+
|
|
|
+ /* Disable interrupts */
|
|
|
+ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
|
|
|
+ synchronize_irq(sgmii->irq);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int emac_sgmii_acpi_match(struct device *dev, void *data)
|
|
|
{
|
|
|
#ifdef CONFIG_ACPI
|
|
@@ -139,7 +251,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
|
|
|
{}
|
|
|
};
|
|
|
const struct acpi_device_id *id = acpi_match_device(match_table, dev);
|
|
|
- emac_sgmii_initialize *initialize = data;
|
|
|
+ emac_sgmii_function *initialize = data;
|
|
|
|
|
|
if (id) {
|
|
|
acpi_handle handle = ACPI_HANDLE(dev);
|
|
@@ -226,9 +338,14 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
|
|
|
goto error_put_device;
|
|
|
}
|
|
|
|
|
|
- phy->initialize = (emac_sgmii_initialize)match->data;
|
|
|
+ phy->initialize = (emac_sgmii_function)match->data;
|
|
|
}
|
|
|
|
|
|
+ phy->open = emac_sgmii_open;
|
|
|
+ phy->close = emac_sgmii_close;
|
|
|
+ phy->link_up = emac_sgmii_link_up;
|
|
|
+ phy->link_down = emac_sgmii_link_down;
|
|
|
+
|
|
|
/* Base address is the first address */
|
|
|
res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0);
|
|
|
if (!res) {
|
|
@@ -256,9 +373,12 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
|
|
|
if (ret)
|
|
|
goto error;
|
|
|
|
|
|
- emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
|
|
|
emac_sgmii_link_init(adpt);
|
|
|
|
|
|
+ ret = platform_get_irq(sgmii_pdev, 0);
|
|
|
+ if (ret > 0)
|
|
|
+ phy->irq = ret;
|
|
|
+
|
|
|
/* We've remapped the addresses, so we don't need the device any
|
|
|
* more. of_find_device_by_node() says we should release it.
|
|
|
*/
|