|
@@ -36,6 +36,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
|
do {
|
|
do {
|
|
pte_t pte = ACCESS_ONCE(*ptep);
|
|
pte_t pte = ACCESS_ONCE(*ptep);
|
|
struct page *page;
|
|
struct page *page;
|
|
|
|
+ /*
|
|
|
|
+ * Similar to the PMD case, NUMA hinting must take slow path
|
|
|
|
+ */
|
|
|
|
+ if (pte_numa(pte))
|
|
|
|
+ return 0;
|
|
|
|
|
|
if ((pte_val(pte) & mask) != result)
|
|
if ((pte_val(pte) & mask) != result)
|
|
return 0;
|
|
return 0;
|
|
@@ -75,6 +80,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
|
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
|
|
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
|
|
return 0;
|
|
return 0;
|
|
if (pmd_huge(pmd) || pmd_large(pmd)) {
|
|
if (pmd_huge(pmd) || pmd_large(pmd)) {
|
|
|
|
+ /*
|
|
|
|
+ * NUMA hinting faults need to be handled in the GUP
|
|
|
|
+ * slowpath for accounting purposes and so that they
|
|
|
|
+ * can be serialised against THP migration.
|
|
|
|
+ */
|
|
|
|
+ if (pmd_numa(pmd))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
|
|
if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
|
|
write, pages, nr))
|
|
write, pages, nr))
|
|
return 0;
|
|
return 0;
|