|
@@ -1898,12 +1898,9 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
|
|
|
struct mempolicy *pol;
|
|
|
struct page *page;
|
|
|
int preferred_nid;
|
|
|
- unsigned int cpuset_mems_cookie;
|
|
|
nodemask_t *nmask;
|
|
|
|
|
|
-retry_cpuset:
|
|
|
pol = get_vma_policy(vma, addr);
|
|
|
- cpuset_mems_cookie = read_mems_allowed_begin();
|
|
|
|
|
|
if (pol->mode == MPOL_INTERLEAVE) {
|
|
|
unsigned nid;
|
|
@@ -1945,8 +1942,6 @@ retry_cpuset:
|
|
|
page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
|
|
|
mpol_cond_put(pol);
|
|
|
out:
|
|
|
- if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
|
|
|
- goto retry_cpuset;
|
|
|
return page;
|
|
|
}
|
|
|
|
|
@@ -1964,23 +1959,15 @@ out:
|
|
|
* Allocate a page from the kernel page pool. When not in
|
|
|
* interrupt context and apply the current process NUMA policy.
|
|
|
* Returns NULL when no page can be allocated.
|
|
|
- *
|
|
|
- * Don't call cpuset_update_task_memory_state() unless
|
|
|
- * 1) it's ok to take cpuset_sem (can WAIT), and
|
|
|
- * 2) allocating for current task (not interrupt).
|
|
|
*/
|
|
|
struct page *alloc_pages_current(gfp_t gfp, unsigned order)
|
|
|
{
|
|
|
struct mempolicy *pol = &default_policy;
|
|
|
struct page *page;
|
|
|
- unsigned int cpuset_mems_cookie;
|
|
|
|
|
|
if (!in_interrupt() && !(gfp & __GFP_THISNODE))
|
|
|
pol = get_task_policy(current);
|
|
|
|
|
|
-retry_cpuset:
|
|
|
- cpuset_mems_cookie = read_mems_allowed_begin();
|
|
|
-
|
|
|
/*
|
|
|
* No reference counting needed for current->mempolicy
|
|
|
* nor system default_policy
|
|
@@ -1992,9 +1979,6 @@ retry_cpuset:
|
|
|
policy_node(gfp, pol, numa_node_id()),
|
|
|
policy_nodemask(gfp, pol));
|
|
|
|
|
|
- if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
|
|
|
- goto retry_cpuset;
|
|
|
-
|
|
|
return page;
|
|
|
}
|
|
|
EXPORT_SYMBOL(alloc_pages_current);
|