|
@@ -5489,19 +5489,30 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
|
|
u32 intr_status, enabled_intr_status;
|
|
u32 intr_status, enabled_intr_status;
|
|
irqreturn_t retval = IRQ_NONE;
|
|
irqreturn_t retval = IRQ_NONE;
|
|
struct ufs_hba *hba = __hba;
|
|
struct ufs_hba *hba = __hba;
|
|
|
|
+ int retries = hba->nutrs;
|
|
|
|
|
|
spin_lock(hba->host->host_lock);
|
|
spin_lock(hba->host->host_lock);
|
|
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
|
|
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
|
|
- enabled_intr_status =
|
|
|
|
- intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
|
|
|
|
|
|
|
|
- if (intr_status)
|
|
|
|
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * There could be max of hba->nutrs reqs in flight and in worst case
|
|
|
|
+ * if the reqs get finished 1 by 1 after the interrupt status is
|
|
|
|
+ * read, make sure we handle them by checking the interrupt status
|
|
|
|
+ * again in a loop until we process all of the reqs before returning.
|
|
|
|
+ */
|
|
|
|
+ do {
|
|
|
|
+ enabled_intr_status =
|
|
|
|
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
|
|
|
|
+ if (intr_status)
|
|
|
|
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
|
|
|
|
+ if (enabled_intr_status) {
|
|
|
|
+ ufshcd_sl_intr(hba, enabled_intr_status);
|
|
|
|
+ retval = IRQ_HANDLED;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
|
|
|
|
+ } while (intr_status && --retries);
|
|
|
|
|
|
- if (enabled_intr_status) {
|
|
|
|
- ufshcd_sl_intr(hba, enabled_intr_status);
|
|
|
|
- retval = IRQ_HANDLED;
|
|
|
|
- }
|
|
|
|
spin_unlock(hba->host->host_lock);
|
|
spin_unlock(hba->host->host_lock);
|
|
return retval;
|
|
return retval;
|
|
}
|
|
}
|