|
@@ -3251,25 +3251,37 @@ static void show_migration_types(unsigned char type)
|
|
|
* Show free area list (used inside shift_scroll-lock stuff)
|
|
|
* We also calculate the percentage fragmentation. We do this by counting the
|
|
|
* memory on each free list with the exception of the first item on the list.
|
|
|
- * Suppresses nodes that are not allowed by current's cpuset if
|
|
|
- * SHOW_MEM_FILTER_NODES is passed.
|
|
|
+ *
|
|
|
+ * Bits in @filter:
|
|
|
+ * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
|
|
|
+ * cpuset.
|
|
|
+ * SHOW_MEM_PERCPU_LISTS: display full per-node per-cpu pcp lists
|
|
|
*/
|
|
|
void show_free_areas(unsigned int filter)
|
|
|
{
|
|
|
+ unsigned long free_pcp = 0;
|
|
|
int cpu;
|
|
|
struct zone *zone;
|
|
|
|
|
|
for_each_populated_zone(zone) {
|
|
|
if (skip_free_areas_node(filter, zone_to_nid(zone)))
|
|
|
continue;
|
|
|
- show_node(zone);
|
|
|
- printk("%s per-cpu:\n", zone->name);
|
|
|
+
|
|
|
+ if (filter & SHOW_MEM_PERCPU_LISTS) {
|
|
|
+ show_node(zone);
|
|
|
+ printk("%s per-cpu:\n", zone->name);
|
|
|
+ }
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
struct per_cpu_pageset *pageset;
|
|
|
|
|
|
pageset = per_cpu_ptr(zone->pageset, cpu);
|
|
|
|
|
|
+ free_pcp += pageset->pcp.count;
|
|
|
+
|
|
|
+ if (!(filter & SHOW_MEM_PERCPU_LISTS))
|
|
|
+ continue;
|
|
|
+
|
|
|
printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
|
|
|
cpu, pageset->pcp.high,
|
|
|
pageset->pcp.batch, pageset->pcp.count);
|
|
@@ -3278,11 +3290,10 @@ void show_free_areas(unsigned int filter)
|
|
|
|
|
|
printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
|
|
|
" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
|
|
|
- " unevictable:%lu"
|
|
|
- " dirty:%lu writeback:%lu unstable:%lu\n"
|
|
|
- " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
|
|
|
+ " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
|
|
|
+ " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
|
|
|
" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
|
|
|
- " free_cma:%lu\n",
|
|
|
+ " free:%lu free_pcp:%lu free_cma:%lu\n",
|
|
|
global_page_state(NR_ACTIVE_ANON),
|
|
|
global_page_state(NR_INACTIVE_ANON),
|
|
|
global_page_state(NR_ISOLATED_ANON),
|
|
@@ -3293,13 +3304,14 @@ void show_free_areas(unsigned int filter)
|
|
|
global_page_state(NR_FILE_DIRTY),
|
|
|
global_page_state(NR_WRITEBACK),
|
|
|
global_page_state(NR_UNSTABLE_NFS),
|
|
|
- global_page_state(NR_FREE_PAGES),
|
|
|
global_page_state(NR_SLAB_RECLAIMABLE),
|
|
|
global_page_state(NR_SLAB_UNRECLAIMABLE),
|
|
|
global_page_state(NR_FILE_MAPPED),
|
|
|
global_page_state(NR_SHMEM),
|
|
|
global_page_state(NR_PAGETABLE),
|
|
|
global_page_state(NR_BOUNCE),
|
|
|
+ global_page_state(NR_FREE_PAGES),
|
|
|
+ free_pcp,
|
|
|
global_page_state(NR_FREE_CMA_PAGES));
|
|
|
|
|
|
for_each_populated_zone(zone) {
|
|
@@ -3307,6 +3319,11 @@ void show_free_areas(unsigned int filter)
|
|
|
|
|
|
if (skip_free_areas_node(filter, zone_to_nid(zone)))
|
|
|
continue;
|
|
|
+
|
|
|
+ free_pcp = 0;
|
|
|
+ for_each_online_cpu(cpu)
|
|
|
+ free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
|
|
|
+
|
|
|
show_node(zone);
|
|
|
printk("%s"
|
|
|
" free:%lukB"
|
|
@@ -3333,6 +3350,8 @@ void show_free_areas(unsigned int filter)
|
|
|
" pagetables:%lukB"
|
|
|
" unstable:%lukB"
|
|
|
" bounce:%lukB"
|
|
|
+ " free_pcp:%lukB"
|
|
|
+ " local_pcp:%ukB"
|
|
|
" free_cma:%lukB"
|
|
|
" writeback_tmp:%lukB"
|
|
|
" pages_scanned:%lu"
|
|
@@ -3364,6 +3383,8 @@ void show_free_areas(unsigned int filter)
|
|
|
K(zone_page_state(zone, NR_PAGETABLE)),
|
|
|
K(zone_page_state(zone, NR_UNSTABLE_NFS)),
|
|
|
K(zone_page_state(zone, NR_BOUNCE)),
|
|
|
+ K(free_pcp),
|
|
|
+ K(this_cpu_read(zone->pageset->pcp.count)),
|
|
|
K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
|
|
|
K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
|
|
|
K(zone_page_state(zone, NR_PAGES_SCANNED)),
|