|
@@ -21,7 +21,29 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
|
|
|
if (!is_swap_pte(*pvmw->pte))
|
|
|
return false;
|
|
|
} else {
|
|
|
- if (!pte_present(*pvmw->pte))
|
|
|
+ /*
|
|
|
+ * We get here when we are trying to unmap a private
|
|
|
+ * device page from the process address space. Such
|
|
|
+ * page is not CPU accessible and thus is mapped as
|
|
|
+ * a special swap entry, nonetheless it still does
|
|
|
+ * count as a valid regular mapping for the page (and
|
|
|
+ * is accounted as such in page maps count).
|
|
|
+ *
|
|
|
+ * So handle this special case as if it was a normal
|
|
|
+ * page mapping ie lock CPU page table and returns
|
|
|
+ * true.
|
|
|
+ *
|
|
|
+ * For more details on device private memory see HMM
|
|
|
+ * (include/linux/hmm.h or mm/hmm.c).
|
|
|
+ */
|
|
|
+ if (is_swap_pte(*pvmw->pte)) {
|
|
|
+ swp_entry_t entry;
|
|
|
+
|
|
|
+ /* Handle un-addressable ZONE_DEVICE memory */
|
|
|
+ entry = pte_to_swp_entry(*pvmw->pte);
|
|
|
+ if (!is_device_private_entry(entry))
|
|
|
+ return false;
|
|
|
+ } else if (!pte_present(*pvmw->pte))
|
|
|
return false;
|
|
|
}
|
|
|
}
|