|
@@ -298,6 +298,7 @@ static const struct mmu_notifier_ops intel_mmuops = {
|
|
|
};
|
|
|
|
|
|
static DEFINE_MUTEX(pasid_mutex);
|
|
|
+static LIST_HEAD(global_svm_list);
|
|
|
|
|
|
int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
|
|
|
{
|
|
@@ -329,13 +330,13 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
|
|
|
|
|
mutex_lock(&pasid_mutex);
|
|
|
if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) {
|
|
|
- int i;
|
|
|
+ struct intel_svm *t;
|
|
|
|
|
|
- idr_for_each_entry(&iommu->pasid_idr, svm, i) {
|
|
|
- if (svm->mm != mm ||
|
|
|
- (svm->flags & SVM_FLAG_PRIVATE_PASID))
|
|
|
+ list_for_each_entry(t, &global_svm_list, list) {
|
|
|
+ if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID))
|
|
|
continue;
|
|
|
|
|
|
+ svm = t;
|
|
|
if (svm->pasid >= pasid_max) {
|
|
|
dev_warn(dev,
|
|
|
"Limited PASID width. Cannot use existing PASID %d\n",
|
|
@@ -404,6 +405,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
|
|
svm->mm = mm;
|
|
|
svm->flags = flags;
|
|
|
INIT_LIST_HEAD_RCU(&svm->devs);
|
|
|
+ INIT_LIST_HEAD(&svm->list);
|
|
|
ret = -ENOMEM;
|
|
|
if (mm) {
|
|
|
ret = mmu_notifier_register(&svm->notifier, mm);
|
|
@@ -430,6 +432,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
|
|
*/
|
|
|
if (cap_caching_mode(iommu->cap))
|
|
|
intel_flush_pasid_dev(svm, sdev, svm->pasid);
|
|
|
+
|
|
|
+ list_add_tail(&svm->list, &global_svm_list);
|
|
|
}
|
|
|
list_add_rcu(&sdev->list, &svm->devs);
|
|
|
|
|
@@ -485,6 +489,8 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
|
|
|
if (svm->mm)
|
|
|
mmu_notifier_unregister(&svm->notifier, svm->mm);
|
|
|
|
|
|
+ list_del(&svm->list);
|
|
|
+
|
|
|
/* We mandate that no page faults may be outstanding
|
|
|
* for the PASID when intel_svm_unbind_mm() is called.
|
|
|
* If that is not obeyed, subtle errors will happen.
|