|
@@ -169,6 +169,14 @@ static LIST_HEAD(pcpu_map_extend_chunks);
|
|
|
*/
|
|
|
int pcpu_nr_empty_pop_pages;
|
|
|
|
|
|
+/*
|
|
|
+ * The number of populated pages in use by the allocator, protected by
|
|
|
+ * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
|
|
|
+ * allocated/deallocated, it is allocated/deallocated in all units of a chunk
|
|
|
+ * and increments/decrements this count by 1).
|
|
|
+ */
|
|
|
+static unsigned long pcpu_nr_populated;
|
|
|
+
|
|
|
/*
|
|
|
* Balance work is used to populate or destroy chunks asynchronously. We
|
|
|
* try to keep the number of populated free pages between
|
|
@@ -1232,6 +1240,7 @@ static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
|
|
|
|
|
|
bitmap_set(chunk->populated, page_start, nr);
|
|
|
chunk->nr_populated += nr;
|
|
|
+ pcpu_nr_populated += nr;
|
|
|
|
|
|
if (!for_alloc) {
|
|
|
chunk->nr_empty_pop_pages += nr;
|
|
@@ -1260,6 +1269,7 @@ static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
|
|
|
chunk->nr_populated -= nr;
|
|
|
chunk->nr_empty_pop_pages -= nr;
|
|
|
pcpu_nr_empty_pop_pages -= nr;
|
|
|
+ pcpu_nr_populated -= nr;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2176,6 +2186,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
|
|
|
pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
|
|
|
pcpu_chunk_relocate(pcpu_first_chunk, -1);
|
|
|
|
|
|
+ /* include all regions of the first chunk */
|
|
|
+ pcpu_nr_populated += PFN_DOWN(size_sum);
|
|
|
+
|
|
|
pcpu_stats_chunk_alloc();
|
|
|
trace_percpu_create_chunk(base_addr);
|
|
|
|
|
@@ -2745,6 +2758,22 @@ void __init setup_per_cpu_areas(void)
|
|
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
+/*
|
|
|
+ * pcpu_nr_pages - calculate total number of populated backing pages
|
|
|
+ *
|
|
|
+ * This reflects the number of pages populated to back chunks. Metadata is
|
|
|
+ * excluded in the number exposed in meminfo as the number of backing pages
|
|
|
+ * scales with the number of cpus and can quickly outweigh the memory used for
|
|
|
+ * metadata. It also keeps this calculation nice and simple.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * Total number of populated backing pages in use by the allocator.
|
|
|
+ */
|
|
|
+unsigned long pcpu_nr_pages(void)
|
|
|
+{
|
|
|
+ return pcpu_nr_populated * pcpu_nr_units;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Percpu allocator is initialized early during boot when neither slab or
|
|
|
* workqueue is available. Plug async management until everything is up
|