|
@@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
|
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
|
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
|
|
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
|
|
- u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF);
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
|
|
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
|
|
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
|
|
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
|
|
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
|
|
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
|
|
pin_reg |= BIT(INTERRUPT_MASK_OFF);
|
|
pin_reg |= BIT(INTERRUPT_MASK_OFF);
|
|
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
|
|
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
|
|
- /*
|
|
|
|
- * When debounce logic is enabled it takes ~900 us before interrupts
|
|
|
|
- * can be enabled. During this "debounce warm up" period the
|
|
|
|
- * "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it
|
|
|
|
- * reads back as 1, signaling that interrupts are now enabled.
|
|
|
|
- */
|
|
|
|
- while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
|
|
|
|
- continue;
|
|
|
|
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
|
|
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d)
|
|
static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
|
|
static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
|
|
{
|
|
{
|
|
int ret = 0;
|
|
int ret = 0;
|
|
- u32 pin_reg;
|
|
|
|
|
|
+ u32 pin_reg, pin_reg_irq_en, mask;
|
|
unsigned long flags, irq_flags;
|
|
unsigned long flags, irq_flags;
|
|
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
|
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
|
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
|
|
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
|
|
@@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
|
|
}
|
|
}
|
|
|
|
|
|
pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
|
|
pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
|
|
|
|
+ /*
|
|
|
|
+ * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the
|
|
|
|
+ * debounce registers of any GPIO will block wake/interrupt status
|
|
|
|
+ * generation for *all* GPIOs for a lenght of time that depends on
|
|
|
|
+ * WAKE_INT_MASTER_REG.MaskStsLength[11:0]. During this period the
|
|
|
|
+ * INTERRUPT_ENABLE bit will read as 0.
|
|
|
|
+ *
|
|
|
|
+ * We temporarily enable irq for the GPIO whose configuration is
|
|
|
|
+ * changing, and then wait for it to read back as 1 to know when
|
|
|
|
+ * debounce has settled and then disable the irq again.
|
|
|
|
+ * We do this polling with the spinlock held to ensure other GPIO
|
|
|
|
+ * access routines do not read an incorrect value for the irq enable
|
|
|
|
+ * bit of other GPIOs. We keep the GPIO masked while polling to avoid
|
|
|
|
+ * spurious irqs, and disable the irq again after polling.
|
|
|
|
+ */
|
|
|
|
+ mask = BIT(INTERRUPT_ENABLE_OFF);
|
|
|
|
+ pin_reg_irq_en = pin_reg;
|
|
|
|
+ pin_reg_irq_en |= mask;
|
|
|
|
+ pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF);
|
|
|
|
+ writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4);
|
|
|
|
+ while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
|
|
|
|
+ continue;
|
|
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
|
|
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
|
|
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
|
|
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
|
|
|
|
|