|
@@ -40,6 +40,11 @@ int hugepages_treat_as_movable;
|
|
|
int hugetlb_max_hstate __read_mostly;
|
|
|
unsigned int default_hstate_idx;
|
|
|
struct hstate hstates[HUGE_MAX_HSTATE];
|
|
|
+/*
|
|
|
+ * Minimum page order among possible hugepage sizes, set to a proper value
|
|
|
+ * at boot time.
|
|
|
+ */
|
|
|
+static unsigned int minimum_order __read_mostly = UINT_MAX;
|
|
|
|
|
|
__initdata LIST_HEAD(huge_boot_pages);
|
|
|
|
|
@@ -1188,19 +1193,13 @@ static void dissolve_free_huge_page(struct page *page)
|
|
|
*/
|
|
|
void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
|
|
|
{
|
|
|
- unsigned int order = 8 * sizeof(void *);
|
|
|
unsigned long pfn;
|
|
|
- struct hstate *h;
|
|
|
|
|
|
if (!hugepages_supported())
|
|
|
return;
|
|
|
|
|
|
- /* Set scan step to minimum hugepage size */
|
|
|
- for_each_hstate(h)
|
|
|
- if (order > huge_page_order(h))
|
|
|
- order = huge_page_order(h);
|
|
|
- VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
|
|
|
- for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
|
|
|
+ VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
|
|
|
+ for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
|
|
|
dissolve_free_huge_page(pfn_to_page(pfn));
|
|
|
}
|
|
|
|
|
@@ -1627,10 +1626,14 @@ static void __init hugetlb_init_hstates(void)
|
|
|
struct hstate *h;
|
|
|
|
|
|
for_each_hstate(h) {
|
|
|
+ if (minimum_order > huge_page_order(h))
|
|
|
+ minimum_order = huge_page_order(h);
|
|
|
+
|
|
|
/* oversize hugepages were init'ed in early boot */
|
|
|
if (!hstate_is_gigantic(h))
|
|
|
hugetlb_hstate_alloc_pages(h);
|
|
|
}
|
|
|
+ VM_BUG_ON(minimum_order == UINT_MAX);
|
|
|
}
|
|
|
|
|
|
static char * __init memfmt(char *buf, unsigned long n)
|