|
@@ -26,6 +26,10 @@
|
|
|
#include <linux/interrupt.h>
|
|
|
#include <asm/page.h>
|
|
|
|
|
|
+#define PASID_ENTRY_P BIT_ULL(0)
|
|
|
+#define PASID_ENTRY_FLPM_5LP BIT_ULL(9)
|
|
|
+#define PASID_ENTRY_SRE BIT_ULL(11)
|
|
|
+
|
|
|
static irqreturn_t prq_event_thread(int irq, void *d);
|
|
|
|
|
|
struct pasid_entry {
|
|
@@ -300,6 +304,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
|
|
struct intel_svm_dev *sdev;
|
|
|
struct intel_svm *svm = NULL;
|
|
|
struct mm_struct *mm = NULL;
|
|
|
+ u64 pasid_entry_val;
|
|
|
int pasid_max;
|
|
|
int ret;
|
|
|
|
|
@@ -406,9 +411,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
|
|
kfree(sdev);
|
|
|
goto out;
|
|
|
}
|
|
|
- iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
|
|
|
+ pasid_entry_val = (u64)__pa(mm->pgd) | PASID_ENTRY_P;
|
|
|
} else
|
|
|
- iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
|
|
|
+ pasid_entry_val = (u64)__pa(init_mm.pgd) |
|
|
|
+ PASID_ENTRY_P | PASID_ENTRY_SRE;
|
|
|
+ if (cpu_feature_enabled(X86_FEATURE_LA57))
|
|
|
+ pasid_entry_val |= PASID_ENTRY_FLPM_5LP;
|
|
|
+
|
|
|
+ iommu->pasid_table[svm->pasid].val = pasid_entry_val;
|
|
|
+
|
|
|
wmb();
|
|
|
/* In caching mode, we still have to flush with PASID 0 when
|
|
|
* a PASID table entry becomes present. Not entirely clear
|