|
@@ -1864,14 +1864,14 @@ int move_freepages(struct zone *zone,
|
|
|
#endif
|
|
|
|
|
|
for (page = start_page; page <= end_page;) {
|
|
|
- /* Make sure we are not inadvertently changing nodes */
|
|
|
- VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
|
|
|
-
|
|
|
if (!pfn_valid_within(page_to_pfn(page))) {
|
|
|
page++;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
+ /* Make sure we are not inadvertently changing nodes */
|
|
|
+ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
|
|
|
+
|
|
|
if (!PageBuddy(page)) {
|
|
|
page++;
|
|
|
continue;
|
|
@@ -2583,30 +2583,22 @@ int __isolate_free_page(struct page *page, unsigned int order)
|
|
|
* Update NUMA hit/miss statistics
|
|
|
*
|
|
|
* Must be called with interrupts disabled.
|
|
|
- *
|
|
|
- * When __GFP_OTHER_NODE is set assume the node of the preferred
|
|
|
- * zone is the local node. This is useful for daemons who allocate
|
|
|
- * memory on behalf of other processes.
|
|
|
*/
|
|
|
-static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
|
|
|
- gfp_t flags)
|
|
|
+static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
|
|
|
{
|
|
|
#ifdef CONFIG_NUMA
|
|
|
- int local_nid = numa_node_id();
|
|
|
enum zone_stat_item local_stat = NUMA_LOCAL;
|
|
|
|
|
|
- if (unlikely(flags & __GFP_OTHER_NODE)) {
|
|
|
+ if (z->node != numa_node_id())
|
|
|
local_stat = NUMA_OTHER;
|
|
|
- local_nid = preferred_zone->node;
|
|
|
- }
|
|
|
|
|
|
- if (z->node == local_nid) {
|
|
|
+ if (z->node == preferred_zone->node)
|
|
|
__inc_zone_state(z, NUMA_HIT);
|
|
|
- __inc_zone_state(z, local_stat);
|
|
|
- } else {
|
|
|
+ else {
|
|
|
__inc_zone_state(z, NUMA_MISS);
|
|
|
__inc_zone_state(preferred_zone, NUMA_FOREIGN);
|
|
|
}
|
|
|
+ __inc_zone_state(z, local_stat);
|
|
|
#endif
|
|
|
}
|
|
|
|
|
@@ -2674,7 +2666,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
|
|
|
}
|
|
|
|
|
|
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
|
|
|
- zone_statistics(preferred_zone, zone, gfp_flags);
|
|
|
+ zone_statistics(preferred_zone, zone);
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
VM_BUG_ON_PAGE(bad_range(zone, page), page);
|
|
@@ -3904,8 +3896,8 @@ EXPORT_SYMBOL(free_pages);
|
|
|
* drivers to provide a backing region of memory for use as either an
|
|
|
* sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
|
|
|
*/
|
|
|
-static struct page *__page_frag_refill(struct page_frag_cache *nc,
|
|
|
- gfp_t gfp_mask)
|
|
|
+static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
|
|
|
+ gfp_t gfp_mask)
|
|
|
{
|
|
|
struct page *page = NULL;
|
|
|
gfp_t gfp = gfp_mask;
|
|
@@ -3925,22 +3917,23 @@ static struct page *__page_frag_refill(struct page_frag_cache *nc,
|
|
|
return page;
|
|
|
}
|
|
|
|
|
|
-void __page_frag_drain(struct page *page, unsigned int order,
|
|
|
- unsigned int count)
|
|
|
+void __page_frag_cache_drain(struct page *page, unsigned int count)
|
|
|
{
|
|
|
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
|
|
|
|
|
|
if (page_ref_sub_and_test(page, count)) {
|
|
|
+ unsigned int order = compound_order(page);
|
|
|
+
|
|
|
if (order == 0)
|
|
|
free_hot_cold_page(page, false);
|
|
|
else
|
|
|
__free_pages_ok(page, order);
|
|
|
}
|
|
|
}
|
|
|
-EXPORT_SYMBOL(__page_frag_drain);
|
|
|
+EXPORT_SYMBOL(__page_frag_cache_drain);
|
|
|
|
|
|
-void *__alloc_page_frag(struct page_frag_cache *nc,
|
|
|
- unsigned int fragsz, gfp_t gfp_mask)
|
|
|
+void *page_frag_alloc(struct page_frag_cache *nc,
|
|
|
+ unsigned int fragsz, gfp_t gfp_mask)
|
|
|
{
|
|
|
unsigned int size = PAGE_SIZE;
|
|
|
struct page *page;
|
|
@@ -3948,7 +3941,7 @@ void *__alloc_page_frag(struct page_frag_cache *nc,
|
|
|
|
|
|
if (unlikely(!nc->va)) {
|
|
|
refill:
|
|
|
- page = __page_frag_refill(nc, gfp_mask);
|
|
|
+ page = __page_frag_cache_refill(nc, gfp_mask);
|
|
|
if (!page)
|
|
|
return NULL;
|
|
|
|
|
@@ -3991,19 +3984,19 @@ refill:
|
|
|
|
|
|
return nc->va + offset;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(__alloc_page_frag);
|
|
|
+EXPORT_SYMBOL(page_frag_alloc);
|
|
|
|
|
|
/*
|
|
|
* Frees a page fragment allocated out of either a compound or order 0 page.
|
|
|
*/
|
|
|
-void __free_page_frag(void *addr)
|
|
|
+void page_frag_free(void *addr)
|
|
|
{
|
|
|
struct page *page = virt_to_head_page(addr);
|
|
|
|
|
|
if (unlikely(put_page_testzero(page)))
|
|
|
__free_pages_ok(page, compound_order(page));
|
|
|
}
|
|
|
-EXPORT_SYMBOL(__free_page_frag);
|
|
|
+EXPORT_SYMBOL(page_frag_free);
|
|
|
|
|
|
static void *make_alloc_exact(unsigned long addr, unsigned int order,
|
|
|
size_t size)
|