|
@@ -178,7 +178,7 @@ __setup("noexec32=", nonx32_setup);
|
|
|
* When memory was added/removed make sure all the processes MM have
|
|
|
* suitable PGD entries in the local PGD level page.
|
|
|
*/
|
|
|
-void sync_global_pgds(unsigned long start, unsigned long end)
|
|
|
+void sync_global_pgds(unsigned long start, unsigned long end, int removed)
|
|
|
{
|
|
|
unsigned long address;
|
|
|
|
|
@@ -186,7 +186,12 @@ void sync_global_pgds(unsigned long start, unsigned long end)
|
|
|
const pgd_t *pgd_ref = pgd_offset_k(address);
|
|
|
struct page *page;
|
|
|
|
|
|
- if (pgd_none(*pgd_ref))
|
|
|
+ /*
|
|
|
+ * When it is called after memory hot remove, pgd_none()
|
|
|
+ * returns true. In this case (removed == 1), we must clear
|
|
|
+ * the PGD entries in the local PGD level page.
|
|
|
+ */
|
|
|
+ if (pgd_none(*pgd_ref) && !removed)
|
|
|
continue;
|
|
|
|
|
|
spin_lock(&pgd_lock);
|
|
@@ -199,12 +204,18 @@ void sync_global_pgds(unsigned long start, unsigned long end)
|
|
|
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
|
|
|
spin_lock(pgt_lock);
|
|
|
|
|
|
- if (pgd_none(*pgd))
|
|
|
- set_pgd(pgd, *pgd_ref);
|
|
|
- else
|
|
|
+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
|
|
|
BUG_ON(pgd_page_vaddr(*pgd)
|
|
|
!= pgd_page_vaddr(*pgd_ref));
|
|
|
|
|
|
+ if (removed) {
|
|
|
+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
|
|
|
+ pgd_clear(pgd);
|
|
|
+ } else {
|
|
|
+ if (pgd_none(*pgd))
|
|
|
+ set_pgd(pgd, *pgd_ref);
|
|
|
+ }
|
|
|
+
|
|
|
spin_unlock(pgt_lock);
|
|
|
}
|
|
|
spin_unlock(&pgd_lock);
|
|
@@ -633,7 +644,7 @@ kernel_physical_mapping_init(unsigned long start,
|
|
|
}
|
|
|
|
|
|
if (pgd_changed)
|
|
|
- sync_global_pgds(addr, end - 1);
|
|
|
+ sync_global_pgds(addr, end - 1, 0);
|
|
|
|
|
|
__flush_tlb_all();
|
|
|
|
|
@@ -976,25 +987,26 @@ static void __meminit
|
|
|
remove_pagetable(unsigned long start, unsigned long end, bool direct)
|
|
|
{
|
|
|
unsigned long next;
|
|
|
+ unsigned long addr;
|
|
|
pgd_t *pgd;
|
|
|
pud_t *pud;
|
|
|
bool pgd_changed = false;
|
|
|
|
|
|
- for (; start < end; start = next) {
|
|
|
- next = pgd_addr_end(start, end);
|
|
|
+ for (addr = start; addr < end; addr = next) {
|
|
|
+ next = pgd_addr_end(addr, end);
|
|
|
|
|
|
- pgd = pgd_offset_k(start);
|
|
|
+ pgd = pgd_offset_k(addr);
|
|
|
if (!pgd_present(*pgd))
|
|
|
continue;
|
|
|
|
|
|
pud = (pud_t *)pgd_page_vaddr(*pgd);
|
|
|
- remove_pud_table(pud, start, next, direct);
|
|
|
+ remove_pud_table(pud, addr, next, direct);
|
|
|
if (free_pud_table(pud, pgd))
|
|
|
pgd_changed = true;
|
|
|
}
|
|
|
|
|
|
if (pgd_changed)
|
|
|
- sync_global_pgds(start, end - 1);
|
|
|
+ sync_global_pgds(start, end - 1, 1);
|
|
|
|
|
|
flush_tlb_all();
|
|
|
}
|
|
@@ -1341,7 +1353,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
|
|
else
|
|
|
err = vmemmap_populate_basepages(start, end, node);
|
|
|
if (!err)
|
|
|
- sync_global_pgds(start, end - 1);
|
|
|
+ sync_global_pgds(start, end - 1, 0);
|
|
|
return err;
|
|
|
}
|
|
|
|