|
@@ -43,7 +43,6 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
|
|
|
*
|
|
|
* @mm: mm struct this HMM struct is bound to
|
|
|
* @lock: lock protecting ranges list
|
|
|
- * @sequence: we track updates to the CPU page table with a sequence number
|
|
|
* @ranges: list of range being snapshotted
|
|
|
* @mirrors: list of mirrors for this mm
|
|
|
* @mmu_notifier: mmu notifier to track updates to CPU page table
|
|
@@ -52,7 +51,6 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
|
|
|
struct hmm {
|
|
|
struct mm_struct *mm;
|
|
|
spinlock_t lock;
|
|
|
- atomic_t sequence;
|
|
|
struct list_head ranges;
|
|
|
struct list_head mirrors;
|
|
|
struct mmu_notifier mmu_notifier;
|
|
@@ -85,7 +83,6 @@ static struct hmm *hmm_register(struct mm_struct *mm)
|
|
|
return NULL;
|
|
|
INIT_LIST_HEAD(&hmm->mirrors);
|
|
|
init_rwsem(&hmm->mirrors_sem);
|
|
|
- atomic_set(&hmm->sequence, 0);
|
|
|
hmm->mmu_notifier.ops = NULL;
|
|
|
INIT_LIST_HEAD(&hmm->ranges);
|
|
|
spin_lock_init(&hmm->lock);
|
|
@@ -126,7 +123,7 @@ void hmm_mm_destroy(struct mm_struct *mm)
|
|
|
kfree(mm->hmm);
|
|
|
}
|
|
|
|
|
|
-static int hmm_invalidate_range(struct hmm *hmm,
|
|
|
+static int hmm_invalidate_range(struct hmm *hmm, bool device,
|
|
|
const struct hmm_update *update)
|
|
|
{
|
|
|
struct hmm_mirror *mirror;
|
|
@@ -147,6 +144,9 @@ static int hmm_invalidate_range(struct hmm *hmm,
|
|
|
}
|
|
|
spin_unlock(&hmm->lock);
|
|
|
|
|
|
+ if (!device)
|
|
|
+ return 0;
|
|
|
+
|
|
|
down_read(&hmm->mirrors_sem);
|
|
|
list_for_each_entry(mirror, &hmm->mirrors, list) {
|
|
|
int ret;
|
|
@@ -189,18 +189,21 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
|
|
}
|
|
|
|
|
|
static int hmm_invalidate_range_start(struct mmu_notifier *mn,
|
|
|
- struct mm_struct *mm,
|
|
|
- unsigned long start,
|
|
|
- unsigned long end,
|
|
|
- bool blockable)
|
|
|
+ struct mm_struct *mm,
|
|
|
+ unsigned long start,
|
|
|
+ unsigned long end,
|
|
|
+ bool blockable)
|
|
|
{
|
|
|
+ struct hmm_update update;
|
|
|
struct hmm *hmm = mm->hmm;
|
|
|
|
|
|
VM_BUG_ON(!hmm);
|
|
|
|
|
|
- atomic_inc(&hmm->sequence);
|
|
|
-
|
|
|
- return 0;
|
|
|
+ update.start = start;
|
|
|
+ update.end = end;
|
|
|
+ update.event = HMM_UPDATE_INVALIDATE;
|
|
|
+ update.blockable = blockable;
|
|
|
+ return hmm_invalidate_range(hmm, true, &update);
|
|
|
}
|
|
|
|
|
|
static void hmm_invalidate_range_end(struct mmu_notifier *mn,
|
|
@@ -217,7 +220,7 @@ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
|
|
|
update.end = end;
|
|
|
update.event = HMM_UPDATE_INVALIDATE;
|
|
|
update.blockable = true;
|
|
|
- hmm_invalidate_range(hmm, &update);
|
|
|
+ hmm_invalidate_range(hmm, false, &update);
|
|
|
}
|
|
|
|
|
|
static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
|