|
|
@@ -2414,13 +2414,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
|
|
|
/* Clear CR0 and sync (disables SMMU and queue processing) */
|
|
|
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
|
|
|
if (reg & CR0_SMMUEN) {
|
|
|
- if (is_kdump_kernel()) {
|
|
|
- arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
|
|
|
- arm_smmu_device_disable(smmu);
|
|
|
- return -EBUSY;
|
|
|
- }
|
|
|
-
|
|
|
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
|
|
|
+ WARN_ON(is_kdump_kernel() && !disable_bypass);
|
|
|
+ arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
|
|
|
}
|
|
|
|
|
|
ret = arm_smmu_device_disable(smmu);
|
|
|
@@ -2513,6 +2509,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+ if (is_kdump_kernel())
|
|
|
+ enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
|
|
|
|
|
|
/* Enable the SMMU interface, or ensure bypass */
|
|
|
if (!bypass || disable_bypass) {
|