|
@@ -1855,54 +1855,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
|
|
|
return newpage;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * page migration rate limiting control.
|
|
|
- * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
|
|
|
- * window of time. Default here says do not migrate more than 1280M per second.
|
|
|
- */
|
|
|
-static unsigned int migrate_interval_millisecs __read_mostly = 100;
|
|
|
-static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
|
|
|
-
|
|
|
-/* Returns true if the node is migrate rate-limited after the update */
|
|
|
-static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
|
|
|
- unsigned long nr_pages)
|
|
|
-{
|
|
|
- unsigned long next_window, interval;
|
|
|
-
|
|
|
- next_window = READ_ONCE(pgdat->numabalancing_migrate_next_window);
|
|
|
- interval = msecs_to_jiffies(migrate_interval_millisecs);
|
|
|
-
|
|
|
- /*
|
|
|
- * Rate-limit the amount of data that is being migrated to a node.
|
|
|
- * Optimal placement is no good if the memory bus is saturated and
|
|
|
- * all the time is being spent migrating!
|
|
|
- */
|
|
|
- if (time_after(jiffies, next_window) &&
|
|
|
- spin_trylock(&pgdat->numabalancing_migrate_lock)) {
|
|
|
- pgdat->numabalancing_migrate_nr_pages = 0;
|
|
|
- do {
|
|
|
- next_window += interval;
|
|
|
- } while (unlikely(time_after(jiffies, next_window)));
|
|
|
-
|
|
|
- WRITE_ONCE(pgdat->numabalancing_migrate_next_window, next_window);
|
|
|
- spin_unlock(&pgdat->numabalancing_migrate_lock);
|
|
|
- }
|
|
|
- if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
|
|
|
- trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
|
|
|
- nr_pages);
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * This is an unlocked non-atomic update so errors are possible.
|
|
|
- * The consequences are failing to migrate when we potentiall should
|
|
|
- * have which is not severe enough to warrant locking. If it is ever
|
|
|
- * a problem, it can be converted to a per-cpu counter.
|
|
|
- */
|
|
|
- pgdat->numabalancing_migrate_nr_pages += nr_pages;
|
|
|
- return false;
|
|
|
-}
|
|
|
-
|
|
|
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
|
|
|
{
|
|
|
int page_lru;
|
|
@@ -1975,14 +1927,6 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
|
|
|
if (page_is_file_cache(page) && PageDirty(page))
|
|
|
goto out;
|
|
|
|
|
|
- /*
|
|
|
- * Rate-limit the amount of data that is being migrated to a node.
|
|
|
- * Optimal placement is no good if the memory bus is saturated and
|
|
|
- * all the time is being spent migrating!
|
|
|
- */
|
|
|
- if (numamigrate_update_ratelimit(pgdat, 1))
|
|
|
- goto out;
|
|
|
-
|
|
|
isolated = numamigrate_isolate_page(pgdat, page);
|
|
|
if (!isolated)
|
|
|
goto out;
|
|
@@ -2029,14 +1973,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|
|
unsigned long mmun_start = address & HPAGE_PMD_MASK;
|
|
|
unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
|
|
|
|
|
|
- /*
|
|
|
- * Rate-limit the amount of data that is being migrated to a node.
|
|
|
- * Optimal placement is no good if the memory bus is saturated and
|
|
|
- * all the time is being spent migrating!
|
|
|
- */
|
|
|
- if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
|
|
|
- goto out_dropref;
|
|
|
-
|
|
|
new_page = alloc_pages_node(node,
|
|
|
(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
|
|
|
HPAGE_PMD_ORDER);
|
|
@@ -2133,7 +2069,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|
|
|
|
|
out_fail:
|
|
|
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
|
|
|
-out_dropref:
|
|
|
ptl = pmd_lock(mm, pmd);
|
|
|
if (pmd_same(*pmd, entry)) {
|
|
|
entry = pmd_modify(entry, vma->vm_page_prot);
|