|
@@ -92,6 +92,44 @@ __setup("noexec32=", nonx32_setup);
|
|
|
* When memory was added make sure all the processes MM have
|
|
|
* suitable PGD entries in the local PGD level page.
|
|
|
*/
|
|
|
+#ifdef CONFIG_X86_5LEVEL
|
|
|
+void sync_global_pgds(unsigned long start, unsigned long end)
|
|
|
+{
|
|
|
+ unsigned long addr;
|
|
|
+
|
|
|
+ for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
|
|
|
+ const pgd_t *pgd_ref = pgd_offset_k(addr);
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ /* Check for overflow */
|
|
|
+ if (addr < start)
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (pgd_none(*pgd_ref))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ spin_lock(&pgd_lock);
|
|
|
+ list_for_each_entry(page, &pgd_list, lru) {
|
|
|
+ pgd_t *pgd;
|
|
|
+ spinlock_t *pgt_lock;
|
|
|
+
|
|
|
+ pgd = (pgd_t *)page_address(page) + pgd_index(addr);
|
|
|
+ /* the pgt_lock only for Xen */
|
|
|
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
|
|
|
+ spin_lock(pgt_lock);
|
|
|
+
|
|
|
+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
|
|
|
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
|
|
|
+
|
|
|
+ if (pgd_none(*pgd))
|
|
|
+ set_pgd(pgd, *pgd_ref);
|
|
|
+
|
|
|
+ spin_unlock(pgt_lock);
|
|
|
+ }
|
|
|
+ spin_unlock(&pgd_lock);
|
|
|
+ }
|
|
|
+}
|
|
|
+#else
|
|
|
void sync_global_pgds(unsigned long start, unsigned long end)
|
|
|
{
|
|
|
unsigned long addr;
|
|
@@ -135,6 +173,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
|
|
|
spin_unlock(&pgd_lock);
|
|
|
}
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
/*
|
|
|
* NOTE: This function is marked __ref because it calls __init function
|