|
@@ -2303,23 +2303,6 @@ out:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
|
|
-static void init_tlb_ubc(void)
|
|
|
-{
|
|
|
- /*
|
|
|
- * This deliberately does not clear the cpumask as it's expensive
|
|
|
- * and unnecessary. If there happens to be data in there then the
|
|
|
- * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
|
|
|
- * then will be cleared.
|
|
|
- */
|
|
|
- current->tlb_ubc.flush_required = false;
|
|
|
-}
|
|
|
-#else
|
|
|
-static inline void init_tlb_ubc(void)
|
|
|
-{
|
|
|
-}
|
|
|
-#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
|
|
|
-
|
|
|
/*
|
|
|
* This is a basic per-node page freer. Used by both kswapd and direct reclaim.
|
|
|
*/
|
|
@@ -2355,8 +2338,6 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
|
|
|
scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
|
|
|
sc->priority == DEF_PRIORITY);
|
|
|
|
|
|
- init_tlb_ubc();
|
|
|
-
|
|
|
blk_start_plug(&plug);
|
|
|
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
|
|
|
nr[LRU_INACTIVE_FILE]) {
|