|
@@ -8,12 +8,13 @@
|
|
|
*
|
|
|
* This is percpu allocator which can handle both static and dynamic
|
|
|
* areas. Percpu areas are allocated in chunks in vmalloc area. Each
|
|
|
- * chunk is consisted of nr_cpu_ids units and the first chunk is used
|
|
|
- * for static percpu variables in the kernel image (special boot time
|
|
|
- * alloc/init handling necessary as these areas need to be brought up
|
|
|
- * before allocation services are running). Unit grows as necessary
|
|
|
- * and all units grow or shrink in unison. When a chunk is filled up,
|
|
|
- * another chunk is allocated. ie. in vmalloc area
|
|
|
+ * chunk is consisted of boot-time determined number of units and the
|
|
|
+ * first chunk is used for static percpu variables in the kernel image
|
|
|
+ * (special boot time alloc/init handling necessary as these areas
|
|
|
+ * need to be brought up before allocation services are running).
|
|
|
+ * Unit grows as necessary and all units grow or shrink in unison.
|
|
|
+ * When a chunk is filled up, another chunk is allocated. ie. in
|
|
|
+ * vmalloc area
|
|
|
*
|
|
|
* c0 c1 c2
|
|
|
* ------------------- ------------------- ------------
|
|
@@ -22,11 +23,13 @@
|
|
|
*
|
|
|
* Allocation is done in offset-size areas of single unit space. Ie,
|
|
|
* an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
|
|
|
- * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
|
|
|
- * percpu base registers pcpu_unit_size apart.
|
|
|
+ * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
|
|
|
+ * cpus. On NUMA, the mapping can be non-linear and even sparse.
|
|
|
+ * Percpu access can be done by configuring percpu base registers
|
|
|
+ * according to cpu to unit mapping and pcpu_unit_size.
|
|
|
*
|
|
|
- * There are usually many small percpu allocations many of them as
|
|
|
- * small as 4 bytes. The allocator organizes chunks into lists
|
|
|
+ * There are usually many small percpu allocations many of them being
|
|
|
+ * as small as 4 bytes. The allocator organizes chunks into lists
|
|
|
* according to free size and tries to allocate from the fullest one.
|
|
|
* Each chunk keeps the maximum contiguous area size hint which is
|
|
|
* guaranteed to be eqaul to or larger than the maximum contiguous
|
|
@@ -43,7 +46,7 @@
|
|
|
*
|
|
|
* To use this allocator, arch code should do the followings.
|
|
|
*
|
|
|
- * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
|
|
|
+ * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
|
|
|
*
|
|
|
* - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
|
|
|
* regular address to percpu pointer and back if they need to be
|
|
@@ -55,7 +58,9 @@
|
|
|
|
|
|
#include <linux/bitmap.h>
|
|
|
#include <linux/bootmem.h>
|
|
|
+#include <linux/err.h>
|
|
|
#include <linux/list.h>
|
|
|
+#include <linux/log2.h>
|
|
|
#include <linux/mm.h>
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/mutex.h>
|
|
@@ -89,25 +94,38 @@ struct pcpu_chunk {
|
|
|
struct list_head list; /* linked to pcpu_slot lists */
|
|
|
int free_size; /* free bytes in the chunk */
|
|
|
int contig_hint; /* max contiguous size hint */
|
|
|
- struct vm_struct *vm; /* mapped vmalloc region */
|
|
|
+ void *base_addr; /* base address of this chunk */
|
|
|
int map_used; /* # of map entries used */
|
|
|
int map_alloc; /* # of map entries allocated */
|
|
|
int *map; /* allocation map */
|
|
|
+ struct vm_struct **vms; /* mapped vmalloc regions */
|
|
|
bool immutable; /* no [de]population allowed */
|
|
|
- struct page **page; /* points to page array */
|
|
|
- struct page *page_ar[]; /* #cpus * UNIT_PAGES */
|
|
|
+ unsigned long populated[]; /* populated bitmap */
|
|
|
};
|
|
|
|
|
|
static int pcpu_unit_pages __read_mostly;
|
|
|
static int pcpu_unit_size __read_mostly;
|
|
|
-static int pcpu_chunk_size __read_mostly;
|
|
|
+static int pcpu_nr_units __read_mostly;
|
|
|
+static int pcpu_atom_size __read_mostly;
|
|
|
static int pcpu_nr_slots __read_mostly;
|
|
|
static size_t pcpu_chunk_struct_size __read_mostly;
|
|
|
|
|
|
+/* cpus with the lowest and highest unit numbers */
|
|
|
+static unsigned int pcpu_first_unit_cpu __read_mostly;
|
|
|
+static unsigned int pcpu_last_unit_cpu __read_mostly;
|
|
|
+
|
|
|
/* the address of the first chunk which starts with the kernel static area */
|
|
|
void *pcpu_base_addr __read_mostly;
|
|
|
EXPORT_SYMBOL_GPL(pcpu_base_addr);
|
|
|
|
|
|
+static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
|
|
|
+const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
|
|
|
+
|
|
|
+/* group information, used for vm allocation */
|
|
|
+static int pcpu_nr_groups __read_mostly;
|
|
|
+static const unsigned long *pcpu_group_offsets __read_mostly;
|
|
|
+static const size_t *pcpu_group_sizes __read_mostly;
|
|
|
+
|
|
|
/*
|
|
|
* The first chunk which always exists. Note that unlike other
|
|
|
* chunks, this one can be allocated and mapped in several different
|
|
@@ -129,9 +147,9 @@ static int pcpu_reserved_chunk_limit;
|
|
|
* Synchronization rules.
|
|
|
*
|
|
|
* There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
|
|
|
- * protects allocation/reclaim paths, chunks and chunk->page arrays.
|
|
|
- * The latter is a spinlock and protects the index data structures -
|
|
|
- * chunk slots, chunks and area maps in chunks.
|
|
|
+ * protects allocation/reclaim paths, chunks, populated bitmap and
|
|
|
+ * vmalloc mapping. The latter is a spinlock and protects the index
|
|
|
+ * data structures - chunk slots, chunks and area maps in chunks.
|
|
|
*
|
|
|
* During allocation, pcpu_alloc_mutex is kept locked all the time and
|
|
|
* pcpu_lock is grabbed and released as necessary. All actual memory
|
|
@@ -178,31 +196,23 @@ static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
|
|
|
|
|
|
static int pcpu_page_idx(unsigned int cpu, int page_idx)
|
|
|
{
|
|
|
- return cpu * pcpu_unit_pages + page_idx;
|
|
|
-}
|
|
|
-
|
|
|
-static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
|
|
|
- unsigned int cpu, int page_idx)
|
|
|
-{
|
|
|
- return &chunk->page[pcpu_page_idx(cpu, page_idx)];
|
|
|
+ return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
|
|
|
}
|
|
|
|
|
|
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
|
|
|
unsigned int cpu, int page_idx)
|
|
|
{
|
|
|
- return (unsigned long)chunk->vm->addr +
|
|
|
- (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
|
|
|
+ return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
|
|
|
+ (page_idx << PAGE_SHIFT);
|
|
|
}
|
|
|
|
|
|
-static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
|
|
|
- int page_idx)
|
|
|
+static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
|
|
|
+ unsigned int cpu, int page_idx)
|
|
|
{
|
|
|
- /*
|
|
|
- * Any possible cpu id can be used here, so there's no need to
|
|
|
- * worry about preemption or cpu hotplug.
|
|
|
- */
|
|
|
- return *pcpu_chunk_pagep(chunk, raw_smp_processor_id(),
|
|
|
- page_idx) != NULL;
|
|
|
+ /* must not be used on pre-mapped chunk */
|
|
|
+ WARN_ON(chunk->immutable);
|
|
|
+
|
|
|
+ return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
|
|
|
}
|
|
|
|
|
|
/* set the pointer to a chunk in a page struct */
|
|
@@ -217,6 +227,34 @@ static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
|
|
|
return (struct pcpu_chunk *)page->index;
|
|
|
}
|
|
|
|
|
|
+static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
|
|
|
+{
|
|
|
+ *rs = find_next_zero_bit(chunk->populated, end, *rs);
|
|
|
+ *re = find_next_bit(chunk->populated, end, *rs + 1);
|
|
|
+}
|
|
|
+
|
|
|
+static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
|
|
|
+{
|
|
|
+ *rs = find_next_bit(chunk->populated, end, *rs);
|
|
|
+ *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * (Un)populated page region iterators. Iterate over (un)populated
|
|
|
+ * page regions betwen @start and @end in @chunk. @rs and @re should
|
|
|
+ * be integer variables and will be set to start and end page index of
|
|
|
+ * the current region.
|
|
|
+ */
|
|
|
+#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
|
|
|
+ for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
|
|
|
+ (rs) < (re); \
|
|
|
+ (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
|
|
|
+
|
|
|
+#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
|
|
|
+ for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
|
|
|
+ (rs) < (re); \
|
|
|
+ (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
|
|
|
+
|
|
|
/**
|
|
|
* pcpu_mem_alloc - allocate memory
|
|
|
* @size: bytes to allocate
|
|
@@ -292,10 +330,10 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
|
|
|
*/
|
|
|
static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
|
|
|
{
|
|
|
- void *first_start = pcpu_first_chunk->vm->addr;
|
|
|
+ void *first_start = pcpu_first_chunk->base_addr;
|
|
|
|
|
|
/* is it in the first chunk? */
|
|
|
- if (addr >= first_start && addr < first_start + pcpu_chunk_size) {
|
|
|
+ if (addr >= first_start && addr < first_start + pcpu_unit_size) {
|
|
|
/* is it in the reserved area? */
|
|
|
if (addr < first_start + pcpu_reserved_chunk_limit)
|
|
|
return pcpu_reserved_chunk;
|
|
@@ -309,7 +347,7 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
|
|
|
* space. Note that any possible cpu id can be used here, so
|
|
|
* there's no need to worry about preemption or cpu hotplug.
|
|
|
*/
|
|
|
- addr += raw_smp_processor_id() * pcpu_unit_size;
|
|
|
+ addr += pcpu_unit_offsets[raw_smp_processor_id()];
|
|
|
return pcpu_get_page_chunk(vmalloc_to_page(addr));
|
|
|
}
|
|
|
|
|
@@ -558,125 +596,327 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * pcpu_unmap - unmap pages out of a pcpu_chunk
|
|
|
+ * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
|
|
|
* @chunk: chunk of interest
|
|
|
- * @page_start: page index of the first page to unmap
|
|
|
- * @page_end: page index of the last page to unmap + 1
|
|
|
- * @flush_tlb: whether to flush tlb or not
|
|
|
+ * @bitmapp: output parameter for bitmap
|
|
|
+ * @may_alloc: may allocate the array
|
|
|
*
|
|
|
- * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
|
|
|
- * If @flush is true, vcache is flushed before unmapping and tlb
|
|
|
- * after.
|
|
|
+ * Returns pointer to array of pointers to struct page and bitmap,
|
|
|
+ * both of which can be indexed with pcpu_page_idx(). The returned
|
|
|
+ * array is cleared to zero and *@bitmapp is copied from
|
|
|
+ * @chunk->populated. Note that there is only one array and bitmap
|
|
|
+ * and access exclusion is the caller's responsibility.
|
|
|
+ *
|
|
|
+ * CONTEXT:
|
|
|
+ * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
|
|
|
+ * Otherwise, don't care.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * Pointer to temp pages array on success, NULL on failure.
|
|
|
*/
|
|
|
-static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
|
|
|
- bool flush_tlb)
|
|
|
+static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
|
|
|
+ unsigned long **bitmapp,
|
|
|
+ bool may_alloc)
|
|
|
{
|
|
|
- unsigned int last = nr_cpu_ids - 1;
|
|
|
- unsigned int cpu;
|
|
|
+ static struct page **pages;
|
|
|
+ static unsigned long *bitmap;
|
|
|
+ size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
|
|
|
+ size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
|
|
|
+ sizeof(unsigned long);
|
|
|
+
|
|
|
+ if (!pages || !bitmap) {
|
|
|
+ if (may_alloc && !pages)
|
|
|
+ pages = pcpu_mem_alloc(pages_size);
|
|
|
+ if (may_alloc && !bitmap)
|
|
|
+ bitmap = pcpu_mem_alloc(bitmap_size);
|
|
|
+ if (!pages || !bitmap)
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
|
|
|
- /* unmap must not be done on immutable chunk */
|
|
|
- WARN_ON(chunk->immutable);
|
|
|
+ memset(pages, 0, pages_size);
|
|
|
+ bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
|
|
|
|
|
|
- /*
|
|
|
- * Each flushing trial can be very expensive, issue flush on
|
|
|
- * the whole region at once rather than doing it for each cpu.
|
|
|
- * This could be an overkill but is more scalable.
|
|
|
- */
|
|
|
- flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
|
|
|
- pcpu_chunk_addr(chunk, last, page_end));
|
|
|
+ *bitmapp = bitmap;
|
|
|
+ return pages;
|
|
|
+}
|
|
|
|
|
|
- for_each_possible_cpu(cpu)
|
|
|
- unmap_kernel_range_noflush(
|
|
|
- pcpu_chunk_addr(chunk, cpu, page_start),
|
|
|
- (page_end - page_start) << PAGE_SHIFT);
|
|
|
-
|
|
|
- /* ditto as flush_cache_vunmap() */
|
|
|
- if (flush_tlb)
|
|
|
- flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
|
|
|
- pcpu_chunk_addr(chunk, last, page_end));
|
|
|
+/**
|
|
|
+ * pcpu_free_pages - free pages which were allocated for @chunk
|
|
|
+ * @chunk: chunk pages were allocated for
|
|
|
+ * @pages: array of pages to be freed, indexed by pcpu_page_idx()
|
|
|
+ * @populated: populated bitmap
|
|
|
+ * @page_start: page index of the first page to be freed
|
|
|
+ * @page_end: page index of the last page to be freed + 1
|
|
|
+ *
|
|
|
+ * Free pages [@page_start and @page_end) in @pages for all units.
|
|
|
+ * The pages were allocated for @chunk.
|
|
|
+ */
|
|
|
+static void pcpu_free_pages(struct pcpu_chunk *chunk,
|
|
|
+ struct page **pages, unsigned long *populated,
|
|
|
+ int page_start, int page_end)
|
|
|
+{
|
|
|
+ unsigned int cpu;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ for (i = page_start; i < page_end; i++) {
|
|
|
+ struct page *page = pages[pcpu_page_idx(cpu, i)];
|
|
|
+
|
|
|
+ if (page)
|
|
|
+ __free_page(page);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
|
|
|
- * @chunk: chunk to depopulate
|
|
|
- * @off: offset to the area to depopulate
|
|
|
- * @size: size of the area to depopulate in bytes
|
|
|
- * @flush: whether to flush cache and tlb or not
|
|
|
- *
|
|
|
- * For each cpu, depopulate and unmap pages [@page_start,@page_end)
|
|
|
- * from @chunk. If @flush is true, vcache is flushed before unmapping
|
|
|
- * and tlb after.
|
|
|
- *
|
|
|
- * CONTEXT:
|
|
|
- * pcpu_alloc_mutex.
|
|
|
+ * pcpu_alloc_pages - allocates pages for @chunk
|
|
|
+ * @chunk: target chunk
|
|
|
+ * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
|
|
|
+ * @populated: populated bitmap
|
|
|
+ * @page_start: page index of the first page to be allocated
|
|
|
+ * @page_end: page index of the last page to be allocated + 1
|
|
|
+ *
|
|
|
+ * Allocate pages [@page_start,@page_end) into @pages for all units.
|
|
|
+ * The allocation is for @chunk. Percpu core doesn't care about the
|
|
|
+ * content of @pages and will pass it verbatim to pcpu_map_pages().
|
|
|
*/
|
|
|
-static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
|
|
|
- bool flush)
|
|
|
+static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
|
|
|
+ struct page **pages, unsigned long *populated,
|
|
|
+ int page_start, int page_end)
|
|
|
{
|
|
|
- int page_start = PFN_DOWN(off);
|
|
|
- int page_end = PFN_UP(off + size);
|
|
|
- int unmap_start = -1;
|
|
|
- int uninitialized_var(unmap_end);
|
|
|
+ const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
|
|
|
unsigned int cpu;
|
|
|
int i;
|
|
|
|
|
|
- for (i = page_start; i < page_end; i++) {
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ for (i = page_start; i < page_end; i++) {
|
|
|
+ struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
|
|
|
+
|
|
|
+ *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
|
|
|
+ if (!*pagep) {
|
|
|
+ pcpu_free_pages(chunk, pages, populated,
|
|
|
+ page_start, page_end);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
- if (!*pagep)
|
|
|
- continue;
|
|
|
+/**
|
|
|
+ * pcpu_pre_unmap_flush - flush cache prior to unmapping
|
|
|
+ * @chunk: chunk the regions to be flushed belongs to
|
|
|
+ * @page_start: page index of the first page to be flushed
|
|
|
+ * @page_end: page index of the last page to be flushed + 1
|
|
|
+ *
|
|
|
+ * Pages in [@page_start,@page_end) of @chunk are about to be
|
|
|
+ * unmapped. Flush cache. As each flushing trial can be very
|
|
|
+ * expensive, issue flush on the whole region at once rather than
|
|
|
+ * doing it for each cpu. This could be an overkill but is more
|
|
|
+ * scalable.
|
|
|
+ */
|
|
|
+static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
|
|
|
+ int page_start, int page_end)
|
|
|
+{
|
|
|
+ flush_cache_vunmap(
|
|
|
+ pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
|
|
|
+ pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
|
|
|
+}
|
|
|
+
|
|
|
+static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
|
|
|
+{
|
|
|
+ unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
|
|
|
+}
|
|
|
|
|
|
- __free_page(*pagep);
|
|
|
+/**
|
|
|
+ * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
|
|
|
+ * @chunk: chunk of interest
|
|
|
+ * @pages: pages array which can be used to pass information to free
|
|
|
+ * @populated: populated bitmap
|
|
|
+ * @page_start: page index of the first page to unmap
|
|
|
+ * @page_end: page index of the last page to unmap + 1
|
|
|
+ *
|
|
|
+ * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
|
|
|
+ * Corresponding elements in @pages were cleared by the caller and can
|
|
|
+ * be used to carry information to pcpu_free_pages() which will be
|
|
|
+ * called after all unmaps are finished. The caller should call
|
|
|
+ * proper pre/post flush functions.
|
|
|
+ */
|
|
|
+static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
|
|
|
+ struct page **pages, unsigned long *populated,
|
|
|
+ int page_start, int page_end)
|
|
|
+{
|
|
|
+ unsigned int cpu;
|
|
|
+ int i;
|
|
|
|
|
|
- /*
|
|
|
- * If it's partial depopulation, it might get
|
|
|
- * populated or depopulated again. Mark the
|
|
|
- * page gone.
|
|
|
- */
|
|
|
- *pagep = NULL;
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ for (i = page_start; i < page_end; i++) {
|
|
|
+ struct page *page;
|
|
|
|
|
|
- unmap_start = unmap_start < 0 ? i : unmap_start;
|
|
|
- unmap_end = i + 1;
|
|
|
+ page = pcpu_chunk_page(chunk, cpu, i);
|
|
|
+ WARN_ON(!page);
|
|
|
+ pages[pcpu_page_idx(cpu, i)] = page;
|
|
|
}
|
|
|
+ __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
|
|
|
+ page_end - page_start);
|
|
|
}
|
|
|
|
|
|
- if (unmap_start >= 0)
|
|
|
- pcpu_unmap(chunk, unmap_start, unmap_end, flush);
|
|
|
+ for (i = page_start; i < page_end; i++)
|
|
|
+ __clear_bit(i, populated);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
|
|
|
+ * @chunk: pcpu_chunk the regions to be flushed belong to
|
|
|
+ * @page_start: page index of the first page to be flushed
|
|
|
+ * @page_end: page index of the last page to be flushed + 1
|
|
|
+ *
|
|
|
+ * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
|
|
|
+ * TLB for the regions. This can be skipped if the area is to be
|
|
|
+ * returned to vmalloc as vmalloc will handle TLB flushing lazily.
|
|
|
+ *
|
|
|
+ * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
|
|
|
+ * for the whole region.
|
|
|
+ */
|
|
|
+static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
|
|
|
+ int page_start, int page_end)
|
|
|
+{
|
|
|
+ flush_tlb_kernel_range(
|
|
|
+ pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
|
|
|
+ pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
|
|
|
+}
|
|
|
+
|
|
|
+static int __pcpu_map_pages(unsigned long addr, struct page **pages,
|
|
|
+ int nr_pages)
|
|
|
+{
|
|
|
+ return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
|
|
|
+ PAGE_KERNEL, pages);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * pcpu_map - map pages into a pcpu_chunk
|
|
|
+ * pcpu_map_pages - map pages into a pcpu_chunk
|
|
|
* @chunk: chunk of interest
|
|
|
+ * @pages: pages array containing pages to be mapped
|
|
|
+ * @populated: populated bitmap
|
|
|
* @page_start: page index of the first page to map
|
|
|
* @page_end: page index of the last page to map + 1
|
|
|
*
|
|
|
- * For each cpu, map pages [@page_start,@page_end) into @chunk.
|
|
|
- * vcache is flushed afterwards.
|
|
|
+ * For each cpu, map pages [@page_start,@page_end) into @chunk. The
|
|
|
+ * caller is responsible for calling pcpu_post_map_flush() after all
|
|
|
+ * mappings are complete.
|
|
|
+ *
|
|
|
+ * This function is responsible for setting corresponding bits in
|
|
|
+ * @chunk->populated bitmap and whatever is necessary for reverse
|
|
|
+ * lookup (addr -> chunk).
|
|
|
*/
|
|
|
-static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
|
|
|
+static int pcpu_map_pages(struct pcpu_chunk *chunk,
|
|
|
+ struct page **pages, unsigned long *populated,
|
|
|
+ int page_start, int page_end)
|
|
|
{
|
|
|
- unsigned int last = nr_cpu_ids - 1;
|
|
|
- unsigned int cpu;
|
|
|
- int err;
|
|
|
-
|
|
|
- /* map must not be done on immutable chunk */
|
|
|
- WARN_ON(chunk->immutable);
|
|
|
+ unsigned int cpu, tcpu;
|
|
|
+ int i, err;
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
- err = map_kernel_range_noflush(
|
|
|
- pcpu_chunk_addr(chunk, cpu, page_start),
|
|
|
- (page_end - page_start) << PAGE_SHIFT,
|
|
|
- PAGE_KERNEL,
|
|
|
- pcpu_chunk_pagep(chunk, cpu, page_start));
|
|
|
+ err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
|
|
|
+ &pages[pcpu_page_idx(cpu, page_start)],
|
|
|
+ page_end - page_start);
|
|
|
if (err < 0)
|
|
|
- return err;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* mapping successful, link chunk and mark populated */
|
|
|
+ for (i = page_start; i < page_end; i++) {
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
|
|
|
+ chunk);
|
|
|
+ __set_bit(i, populated);
|
|
|
}
|
|
|
|
|
|
- /* flush at once, please read comments in pcpu_unmap() */
|
|
|
- flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
|
|
|
- pcpu_chunk_addr(chunk, last, page_end));
|
|
|
return 0;
|
|
|
+
|
|
|
+err:
|
|
|
+ for_each_possible_cpu(tcpu) {
|
|
|
+ if (tcpu == cpu)
|
|
|
+ break;
|
|
|
+ __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
|
|
|
+ page_end - page_start);
|
|
|
+ }
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * pcpu_post_map_flush - flush cache after mapping
|
|
|
+ * @chunk: pcpu_chunk the regions to be flushed belong to
|
|
|
+ * @page_start: page index of the first page to be flushed
|
|
|
+ * @page_end: page index of the last page to be flushed + 1
|
|
|
+ *
|
|
|
+ * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
|
|
|
+ * cache.
|
|
|
+ *
|
|
|
+ * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
|
|
|
+ * for the whole region.
|
|
|
+ */
|
|
|
+static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
|
|
|
+ int page_start, int page_end)
|
|
|
+{
|
|
|
+ flush_cache_vmap(
|
|
|
+ pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
|
|
|
+ pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
|
|
|
+ * @chunk: chunk to depopulate
|
|
|
+ * @off: offset to the area to depopulate
|
|
|
+ * @size: size of the area to depopulate in bytes
|
|
|
+ * @flush: whether to flush cache and tlb or not
|
|
|
+ *
|
|
|
+ * For each cpu, depopulate and unmap pages [@page_start,@page_end)
|
|
|
+ * from @chunk. If @flush is true, vcache is flushed before unmapping
|
|
|
+ * and tlb after.
|
|
|
+ *
|
|
|
+ * CONTEXT:
|
|
|
+ * pcpu_alloc_mutex.
|
|
|
+ */
|
|
|
+static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
|
|
|
+{
|
|
|
+ int page_start = PFN_DOWN(off);
|
|
|
+ int page_end = PFN_UP(off + size);
|
|
|
+ struct page **pages;
|
|
|
+ unsigned long *populated;
|
|
|
+ int rs, re;
|
|
|
+
|
|
|
+ /* quick path, check whether it's empty already */
|
|
|
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
|
|
|
+ if (rs == page_start && re == page_end)
|
|
|
+ return;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* immutable chunks can't be depopulated */
|
|
|
+ WARN_ON(chunk->immutable);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If control reaches here, there must have been at least one
|
|
|
+ * successful population attempt so the temp pages array must
|
|
|
+ * be available now.
|
|
|
+ */
|
|
|
+ pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
|
|
|
+ BUG_ON(!pages);
|
|
|
+
|
|
|
+ /* unmap and free */
|
|
|
+ pcpu_pre_unmap_flush(chunk, page_start, page_end);
|
|
|
+
|
|
|
+ pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
|
|
|
+ pcpu_unmap_pages(chunk, pages, populated, rs, re);
|
|
|
+
|
|
|
+ /* no need to flush tlb, vmalloc will handle it lazily */
|
|
|
+
|
|
|
+ pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
|
|
|
+ pcpu_free_pages(chunk, pages, populated, rs, re);
|
|
|
+
|
|
|
+ /* commit new bitmap */
|
|
|
+ bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -693,58 +933,68 @@ static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
|
|
|
*/
|
|
|
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
|
|
|
{
|
|
|
- const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
|
|
|
int page_start = PFN_DOWN(off);
|
|
|
int page_end = PFN_UP(off + size);
|
|
|
- int map_start = -1;
|
|
|
- int uninitialized_var(map_end);
|
|
|
+ int free_end = page_start, unmap_end = page_start;
|
|
|
+ struct page **pages;
|
|
|
+ unsigned long *populated;
|
|
|
unsigned int cpu;
|
|
|
- int i;
|
|
|
+ int rs, re, rc;
|
|
|
|
|
|
- for (i = page_start; i < page_end; i++) {
|
|
|
- if (pcpu_chunk_page_occupied(chunk, i)) {
|
|
|
- if (map_start >= 0) {
|
|
|
- if (pcpu_map(chunk, map_start, map_end))
|
|
|
- goto err;
|
|
|
- map_start = -1;
|
|
|
- }
|
|
|
- continue;
|
|
|
- }
|
|
|
+ /* quick path, check whether all pages are already there */
|
|
|
+ pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) {
|
|
|
+ if (rs == page_start && re == page_end)
|
|
|
+ goto clear;
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
- map_start = map_start < 0 ? i : map_start;
|
|
|
- map_end = i + 1;
|
|
|
+ /* need to allocate and map pages, this chunk can't be immutable */
|
|
|
+ WARN_ON(chunk->immutable);
|
|
|
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
|
|
|
+ pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
|
|
|
+ if (!pages)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- *pagep = alloc_pages_node(cpu_to_node(cpu),
|
|
|
- alloc_mask, 0);
|
|
|
- if (!*pagep)
|
|
|
- goto err;
|
|
|
- pcpu_set_page_chunk(*pagep, chunk);
|
|
|
- }
|
|
|
+ /* alloc and map */
|
|
|
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
|
|
|
+ rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
|
|
|
+ if (rc)
|
|
|
+ goto err_free;
|
|
|
+ free_end = re;
|
|
|
}
|
|
|
|
|
|
- if (map_start >= 0 && pcpu_map(chunk, map_start, map_end))
|
|
|
- goto err;
|
|
|
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
|
|
|
+ rc = pcpu_map_pages(chunk, pages, populated, rs, re);
|
|
|
+ if (rc)
|
|
|
+ goto err_unmap;
|
|
|
+ unmap_end = re;
|
|
|
+ }
|
|
|
+ pcpu_post_map_flush(chunk, page_start, page_end);
|
|
|
|
|
|
+ /* commit new bitmap */
|
|
|
+ bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
|
|
|
+clear:
|
|
|
for_each_possible_cpu(cpu)
|
|
|
- memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0,
|
|
|
- size);
|
|
|
-
|
|
|
+ memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
|
|
|
return 0;
|
|
|
-err:
|
|
|
- /* likely under heavy memory pressure, give memory back */
|
|
|
- pcpu_depopulate_chunk(chunk, off, size, true);
|
|
|
- return -ENOMEM;
|
|
|
+
|
|
|
+err_unmap:
|
|
|
+ pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
|
|
|
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
|
|
|
+ pcpu_unmap_pages(chunk, pages, populated, rs, re);
|
|
|
+ pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
|
|
|
+err_free:
|
|
|
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
|
|
|
+ pcpu_free_pages(chunk, pages, populated, rs, re);
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
static void free_pcpu_chunk(struct pcpu_chunk *chunk)
|
|
|
{
|
|
|
if (!chunk)
|
|
|
return;
|
|
|
- if (chunk->vm)
|
|
|
- free_vm_area(chunk->vm);
|
|
|
+ if (chunk->vms)
|
|
|
+ pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups);
|
|
|
pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
|
|
|
kfree(chunk);
|
|
|
}
|
|
@@ -760,10 +1010,11 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
|
|
|
chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
|
|
|
chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
|
|
|
chunk->map[chunk->map_used++] = pcpu_unit_size;
|
|
|
- chunk->page = chunk->page_ar;
|
|
|
|
|
|
- chunk->vm = get_vm_area(pcpu_chunk_size, VM_ALLOC);
|
|
|
- if (!chunk->vm) {
|
|
|
+ chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
|
|
|
+ pcpu_nr_groups, pcpu_atom_size,
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!chunk->vms) {
|
|
|
free_pcpu_chunk(chunk);
|
|
|
return NULL;
|
|
|
}
|
|
@@ -771,6 +1022,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
|
|
|
INIT_LIST_HEAD(&chunk->list);
|
|
|
chunk->free_size = pcpu_unit_size;
|
|
|
chunk->contig_hint = pcpu_unit_size;
|
|
|
+ chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0];
|
|
|
|
|
|
return chunk;
|
|
|
}
|
|
@@ -860,7 +1112,8 @@ area_found:
|
|
|
|
|
|
mutex_unlock(&pcpu_alloc_mutex);
|
|
|
|
|
|
- return __addr_to_pcpu_ptr(chunk->vm->addr + off);
|
|
|
+ /* return address relative to base address */
|
|
|
+ return __addr_to_pcpu_ptr(chunk->base_addr + off);
|
|
|
|
|
|
fail_unlock:
|
|
|
spin_unlock_irq(&pcpu_lock);
|
|
@@ -938,12 +1191,13 @@ static void pcpu_reclaim(struct work_struct *work)
|
|
|
}
|
|
|
|
|
|
spin_unlock_irq(&pcpu_lock);
|
|
|
- mutex_unlock(&pcpu_alloc_mutex);
|
|
|
|
|
|
list_for_each_entry_safe(chunk, next, &todo, list) {
|
|
|
- pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
|
|
|
+ pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
|
|
|
free_pcpu_chunk(chunk);
|
|
|
}
|
|
|
+
|
|
|
+ mutex_unlock(&pcpu_alloc_mutex);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -968,7 +1222,7 @@ void free_percpu(void *ptr)
|
|
|
spin_lock_irqsave(&pcpu_lock, flags);
|
|
|
|
|
|
chunk = pcpu_chunk_addr_search(addr);
|
|
|
- off = addr - chunk->vm->addr;
|
|
|
+ off = addr - chunk->base_addr;
|
|
|
|
|
|
pcpu_free_area(chunk, off);
|
|
|
|
|
@@ -987,30 +1241,295 @@ void free_percpu(void *ptr)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(free_percpu);
|
|
|
|
|
|
+static inline size_t pcpu_calc_fc_sizes(size_t static_size,
|
|
|
+ size_t reserved_size,
|
|
|
+ ssize_t *dyn_sizep)
|
|
|
+{
|
|
|
+ size_t size_sum;
|
|
|
+
|
|
|
+ size_sum = PFN_ALIGN(static_size + reserved_size +
|
|
|
+ (*dyn_sizep >= 0 ? *dyn_sizep : 0));
|
|
|
+ if (*dyn_sizep != 0)
|
|
|
+ *dyn_sizep = size_sum - static_size - reserved_size;
|
|
|
+
|
|
|
+ return size_sum;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
- * pcpu_setup_first_chunk - initialize the first percpu chunk
|
|
|
- * @get_page_fn: callback to fetch page pointer
|
|
|
- * @static_size: the size of static percpu area in bytes
|
|
|
+ * pcpu_alloc_alloc_info - allocate percpu allocation info
|
|
|
+ * @nr_groups: the number of groups
|
|
|
+ * @nr_units: the number of units
|
|
|
+ *
|
|
|
+ * Allocate ai which is large enough for @nr_groups groups containing
|
|
|
+ * @nr_units units. The returned ai's groups[0].cpu_map points to the
|
|
|
+ * cpu_map array which is long enough for @nr_units and filled with
|
|
|
+ * NR_CPUS. It's the caller's responsibility to initialize cpu_map
|
|
|
+ * pointer of other groups.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * Pointer to the allocated pcpu_alloc_info on success, NULL on
|
|
|
+ * failure.
|
|
|
+ */
|
|
|
+struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
|
|
|
+ int nr_units)
|
|
|
+{
|
|
|
+ struct pcpu_alloc_info *ai;
|
|
|
+ size_t base_size, ai_size;
|
|
|
+ void *ptr;
|
|
|
+ int unit;
|
|
|
+
|
|
|
+ base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
|
|
|
+ __alignof__(ai->groups[0].cpu_map[0]));
|
|
|
+ ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
|
|
|
+
|
|
|
+ ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
|
|
|
+ if (!ptr)
|
|
|
+ return NULL;
|
|
|
+ ai = ptr;
|
|
|
+ ptr += base_size;
|
|
|
+
|
|
|
+ ai->groups[0].cpu_map = ptr;
|
|
|
+
|
|
|
+ for (unit = 0; unit < nr_units; unit++)
|
|
|
+ ai->groups[0].cpu_map[unit] = NR_CPUS;
|
|
|
+
|
|
|
+ ai->nr_groups = nr_groups;
|
|
|
+ ai->__ai_size = PFN_ALIGN(ai_size);
|
|
|
+
|
|
|
+ return ai;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * pcpu_free_alloc_info - free percpu allocation info
|
|
|
+ * @ai: pcpu_alloc_info to free
|
|
|
+ *
|
|
|
+ * Free @ai which was allocated by pcpu_alloc_alloc_info().
|
|
|
+ */
|
|
|
+void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
|
|
|
+{
|
|
|
+ free_bootmem(__pa(ai), ai->__ai_size);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
|
|
|
* @reserved_size: the size of reserved percpu area in bytes
|
|
|
* @dyn_size: free size for dynamic allocation in bytes, -1 for auto
|
|
|
- * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
|
|
|
- * @base_addr: mapped address, NULL for auto
|
|
|
- * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
|
|
|
+ * @atom_size: allocation atom size
|
|
|
+ * @cpu_distance_fn: callback to determine distance between cpus, optional
|
|
|
+ *
|
|
|
+ * This function determines grouping of units, their mappings to cpus
|
|
|
+ * and other parameters considering needed percpu size, allocation
|
|
|
+ * atom size and distances between CPUs.
|
|
|
+ *
|
|
|
+ * Groups are always mutliples of atom size and CPUs which are of
|
|
|
+ * LOCAL_DISTANCE both ways are grouped together and share space for
|
|
|
+ * units in the same group. The returned configuration is guaranteed
|
|
|
+ * to have CPUs on different nodes on different groups and >=75% usage
|
|
|
+ * of allocated virtual address space.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * On success, pointer to the new allocation_info is returned. On
|
|
|
+ * failure, ERR_PTR value is returned.
|
|
|
+ */
|
|
|
+struct pcpu_alloc_info * __init pcpu_build_alloc_info(
|
|
|
+ size_t reserved_size, ssize_t dyn_size,
|
|
|
+ size_t atom_size,
|
|
|
+ pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
|
|
|
+{
|
|
|
+ static int group_map[NR_CPUS] __initdata;
|
|
|
+ static int group_cnt[NR_CPUS] __initdata;
|
|
|
+ const size_t static_size = __per_cpu_end - __per_cpu_start;
|
|
|
+ int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
|
|
|
+ size_t size_sum, min_unit_size, alloc_size;
|
|
|
+ int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
|
|
|
+ int last_allocs, group, unit;
|
|
|
+ unsigned int cpu, tcpu;
|
|
|
+ struct pcpu_alloc_info *ai;
|
|
|
+ unsigned int *cpu_map;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Determine min_unit_size, alloc_size and max_upa such that
|
|
|
+ * alloc_size is multiple of atom_size and is the smallest
|
|
|
+ * which can accomodate 4k aligned segments which are equal to
|
|
|
+ * or larger than min_unit_size.
|
|
|
+ */
|
|
|
+ size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
|
|
|
+ min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
|
|
|
+
|
|
|
+ alloc_size = roundup(min_unit_size, atom_size);
|
|
|
+ upa = alloc_size / min_unit_size;
|
|
|
+ while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
|
|
|
+ upa--;
|
|
|
+ max_upa = upa;
|
|
|
+
|
|
|
+ /* group cpus according to their proximity */
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ group = 0;
|
|
|
+ next_group:
|
|
|
+ for_each_possible_cpu(tcpu) {
|
|
|
+ if (cpu == tcpu)
|
|
|
+ break;
|
|
|
+ if (group_map[tcpu] == group && cpu_distance_fn &&
|
|
|
+ (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
|
|
|
+ cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
|
|
|
+ group++;
|
|
|
+ nr_groups = max(nr_groups, group + 1);
|
|
|
+ goto next_group;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ group_map[cpu] = group;
|
|
|
+ group_cnt[group]++;
|
|
|
+ group_cnt_max = max(group_cnt_max, group_cnt[group]);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Expand unit size until address space usage goes over 75%
|
|
|
+ * and then as much as possible without using more address
|
|
|
+ * space.
|
|
|
+ */
|
|
|
+ last_allocs = INT_MAX;
|
|
|
+ for (upa = max_upa; upa; upa--) {
|
|
|
+ int allocs = 0, wasted = 0;
|
|
|
+
|
|
|
+ if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ for (group = 0; group < nr_groups; group++) {
|
|
|
+ int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
|
|
|
+ allocs += this_allocs;
|
|
|
+ wasted += this_allocs * upa - group_cnt[group];
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Don't accept if wastage is over 25%. The
|
|
|
+ * greater-than comparison ensures upa==1 always
|
|
|
+ * passes the following check.
|
|
|
+ */
|
|
|
+ if (wasted > num_possible_cpus() / 3)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* and then don't consume more memory */
|
|
|
+ if (allocs > last_allocs)
|
|
|
+ break;
|
|
|
+ last_allocs = allocs;
|
|
|
+ best_upa = upa;
|
|
|
+ }
|
|
|
+ upa = best_upa;
|
|
|
+
|
|
|
+ /* allocate and fill alloc_info */
|
|
|
+ for (group = 0; group < nr_groups; group++)
|
|
|
+ nr_units += roundup(group_cnt[group], upa);
|
|
|
+
|
|
|
+ ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
|
|
|
+ if (!ai)
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+ cpu_map = ai->groups[0].cpu_map;
|
|
|
+
|
|
|
+ for (group = 0; group < nr_groups; group++) {
|
|
|
+ ai->groups[group].cpu_map = cpu_map;
|
|
|
+ cpu_map += roundup(group_cnt[group], upa);
|
|
|
+ }
|
|
|
+
|
|
|
+ ai->static_size = static_size;
|
|
|
+ ai->reserved_size = reserved_size;
|
|
|
+ ai->dyn_size = dyn_size;
|
|
|
+ ai->unit_size = alloc_size / upa;
|
|
|
+ ai->atom_size = atom_size;
|
|
|
+ ai->alloc_size = alloc_size;
|
|
|
+
|
|
|
+ for (group = 0, unit = 0; group_cnt[group]; group++) {
|
|
|
+ struct pcpu_group_info *gi = &ai->groups[group];
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Initialize base_offset as if all groups are located
|
|
|
+ * back-to-back. The caller should update this to
|
|
|
+ * reflect actual allocation.
|
|
|
+ */
|
|
|
+ gi->base_offset = unit * ai->unit_size;
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ if (group_map[cpu] == group)
|
|
|
+ gi->cpu_map[gi->nr_units++] = cpu;
|
|
|
+ gi->nr_units = roundup(gi->nr_units, upa);
|
|
|
+ unit += gi->nr_units;
|
|
|
+ }
|
|
|
+ BUG_ON(unit != nr_units);
|
|
|
+
|
|
|
+ return ai;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
|
|
|
+ * @lvl: loglevel
|
|
|
+ * @ai: allocation info to dump
|
|
|
+ *
|
|
|
+ * Print out information about @ai using loglevel @lvl.
|
|
|
+ */
|
|
|
+static void pcpu_dump_alloc_info(const char *lvl,
|
|
|
+ const struct pcpu_alloc_info *ai)
|
|
|
+{
|
|
|
+ int group_width = 1, cpu_width = 1, width;
|
|
|
+ char empty_str[] = "--------";
|
|
|
+ int alloc = 0, alloc_end = 0;
|
|
|
+ int group, v;
|
|
|
+ int upa, apl; /* units per alloc, allocs per line */
|
|
|
+
|
|
|
+ v = ai->nr_groups;
|
|
|
+ while (v /= 10)
|
|
|
+ group_width++;
|
|
|
+
|
|
|
+ v = num_possible_cpus();
|
|
|
+ while (v /= 10)
|
|
|
+ cpu_width++;
|
|
|
+ empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
|
|
|
+
|
|
|
+ upa = ai->alloc_size / ai->unit_size;
|
|
|
+ width = upa * (cpu_width + 1) + group_width + 3;
|
|
|
+ apl = rounddown_pow_of_two(max(60 / width, 1));
|
|
|
+
|
|
|
+ printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
|
|
|
+ lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
|
|
|
+ ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
|
|
|
+
|
|
|
+ for (group = 0; group < ai->nr_groups; group++) {
|
|
|
+ const struct pcpu_group_info *gi = &ai->groups[group];
|
|
|
+ int unit = 0, unit_end = 0;
|
|
|
+
|
|
|
+ BUG_ON(gi->nr_units % upa);
|
|
|
+ for (alloc_end += gi->nr_units / upa;
|
|
|
+ alloc < alloc_end; alloc++) {
|
|
|
+ if (!(alloc % apl)) {
|
|
|
+ printk("\n");
|
|
|
+ printk("%spcpu-alloc: ", lvl);
|
|
|
+ }
|
|
|
+ printk("[%0*d] ", group_width, group);
|
|
|
+
|
|
|
+ for (unit_end += upa; unit < unit_end; unit++)
|
|
|
+ if (gi->cpu_map[unit] != NR_CPUS)
|
|
|
+ printk("%0*d ", cpu_width,
|
|
|
+ gi->cpu_map[unit]);
|
|
|
+ else
|
|
|
+ printk("%s ", empty_str);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ printk("\n");
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * pcpu_setup_first_chunk - initialize the first percpu chunk
|
|
|
+ * @ai: pcpu_alloc_info describing how to percpu area is shaped
|
|
|
+ * @base_addr: mapped address
|
|
|
*
|
|
|
* Initialize the first percpu chunk which contains the kernel static
|
|
|
* perpcu area. This function is to be called from arch percpu area
|
|
|
- * setup path. The first two parameters are mandatory. The rest are
|
|
|
- * optional.
|
|
|
- *
|
|
|
- * @get_page_fn() should return pointer to percpu page given cpu
|
|
|
- * number and page number. It should at least return enough pages to
|
|
|
- * cover the static area. The returned pages for static area should
|
|
|
- * have been initialized with valid data. If @unit_size is specified,
|
|
|
- * it can also return pages after the static area. NULL return
|
|
|
- * indicates end of pages for the cpu. Note that @get_page_fn() must
|
|
|
- * return the same number of pages for all cpus.
|
|
|
- *
|
|
|
- * @reserved_size, if non-zero, specifies the amount of bytes to
|
|
|
+ * setup path.
|
|
|
+ *
|
|
|
+ * @ai contains all information necessary to initialize the first
|
|
|
+ * chunk and prime the dynamic percpu allocator.
|
|
|
+ *
|
|
|
+ * @ai->static_size is the size of static percpu area.
|
|
|
+ *
|
|
|
+ * @ai->reserved_size, if non-zero, specifies the amount of bytes to
|
|
|
* reserve after the static area in the first chunk. This reserves
|
|
|
* the first chunk such that it's available only through reserved
|
|
|
* percpu allocation. This is primarily used to serve module percpu
|
|
@@ -1018,22 +1537,29 @@ EXPORT_SYMBOL_GPL(free_percpu);
|
|
|
* limited offset range for symbol relocations to guarantee module
|
|
|
* percpu symbols fall inside the relocatable range.
|
|
|
*
|
|
|
- * @dyn_size, if non-negative, determines the number of bytes
|
|
|
- * available for dynamic allocation in the first chunk. Specifying
|
|
|
- * non-negative value makes percpu leave alone the area beyond
|
|
|
- * @static_size + @reserved_size + @dyn_size.
|
|
|
+ * @ai->dyn_size determines the number of bytes available for dynamic
|
|
|
+ * allocation in the first chunk. The area between @ai->static_size +
|
|
|
+ * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
|
|
|
*
|
|
|
- * @unit_size, if non-negative, specifies unit size and must be
|
|
|
- * aligned to PAGE_SIZE and equal to or larger than @static_size +
|
|
|
- * @reserved_size + if non-negative, @dyn_size.
|
|
|
+ * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
|
|
|
+ * and equal to or larger than @ai->static_size + @ai->reserved_size +
|
|
|
+ * @ai->dyn_size.
|
|
|
*
|
|
|
- * Non-null @base_addr means that the caller already allocated virtual
|
|
|
- * region for the first chunk and mapped it. percpu must not mess
|
|
|
- * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
|
|
|
- * @populate_pte_fn doesn't make any sense.
|
|
|
+ * @ai->atom_size is the allocation atom size and used as alignment
|
|
|
+ * for vm areas.
|
|
|
*
|
|
|
- * @populate_pte_fn is used to populate the pagetable. NULL means the
|
|
|
- * caller already populated the pagetable.
|
|
|
+ * @ai->alloc_size is the allocation size and always multiple of
|
|
|
+ * @ai->atom_size. This is larger than @ai->atom_size if
|
|
|
+ * @ai->unit_size is larger than @ai->atom_size.
|
|
|
+ *
|
|
|
+ * @ai->nr_groups and @ai->groups describe virtual memory layout of
|
|
|
+ * percpu areas. Units which should be colocated are put into the
|
|
|
+ * same group. Dynamic VM areas will be allocated according to these
|
|
|
+ * groupings. If @ai->nr_groups is zero, a single group containing
|
|
|
+ * all units is assumed.
|
|
|
+ *
|
|
|
+ * The caller should have mapped the first chunk at @base_addr and
|
|
|
+ * copied static data to each unit.
|
|
|
*
|
|
|
* If the first chunk ends up with both reserved and dynamic areas, it
|
|
|
* is served by two chunks - one to serve the core static and reserved
|
|
@@ -1043,49 +1569,83 @@ EXPORT_SYMBOL_GPL(free_percpu);
|
|
|
* and available for dynamic allocation like any other chunks.
|
|
|
*
|
|
|
* RETURNS:
|
|
|
- * The determined pcpu_unit_size which can be used to initialize
|
|
|
- * percpu access.
|
|
|
+ * 0 on success, -errno on failure.
|
|
|
*/
|
|
|
-size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
|
|
|
- size_t static_size, size_t reserved_size,
|
|
|
- ssize_t dyn_size, ssize_t unit_size,
|
|
|
- void *base_addr,
|
|
|
- pcpu_populate_pte_fn_t populate_pte_fn)
|
|
|
+int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
|
|
|
+ void *base_addr)
|
|
|
{
|
|
|
- static struct vm_struct first_vm;
|
|
|
static int smap[2], dmap[2];
|
|
|
- size_t size_sum = static_size + reserved_size +
|
|
|
- (dyn_size >= 0 ? dyn_size : 0);
|
|
|
+ size_t dyn_size = ai->dyn_size;
|
|
|
+ size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
|
|
|
struct pcpu_chunk *schunk, *dchunk = NULL;
|
|
|
+ unsigned long *group_offsets;
|
|
|
+ size_t *group_sizes;
|
|
|
+ unsigned long *unit_off;
|
|
|
unsigned int cpu;
|
|
|
- int nr_pages;
|
|
|
- int err, i;
|
|
|
+ int *unit_map;
|
|
|
+ int group, unit, i;
|
|
|
|
|
|
- /* santiy checks */
|
|
|
+ /* sanity checks */
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
|
|
|
ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
|
|
|
- BUG_ON(!static_size);
|
|
|
- if (unit_size >= 0) {
|
|
|
- BUG_ON(unit_size < size_sum);
|
|
|
- BUG_ON(unit_size & ~PAGE_MASK);
|
|
|
- BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
|
|
|
- } else
|
|
|
- BUG_ON(base_addr);
|
|
|
- BUG_ON(base_addr && populate_pte_fn);
|
|
|
-
|
|
|
- if (unit_size >= 0)
|
|
|
- pcpu_unit_pages = unit_size >> PAGE_SHIFT;
|
|
|
- else
|
|
|
- pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
|
|
|
- PFN_UP(size_sum));
|
|
|
+ BUG_ON(ai->nr_groups <= 0);
|
|
|
+ BUG_ON(!ai->static_size);
|
|
|
+ BUG_ON(!base_addr);
|
|
|
+ BUG_ON(ai->unit_size < size_sum);
|
|
|
+ BUG_ON(ai->unit_size & ~PAGE_MASK);
|
|
|
+ BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
|
|
|
+
|
|
|
+ pcpu_dump_alloc_info(KERN_DEBUG, ai);
|
|
|
+
|
|
|
+ /* process group information and build config tables accordingly */
|
|
|
+ group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
|
|
|
+ group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
|
|
|
+ unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
|
|
|
+ unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
|
|
|
+
|
|
|
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
|
|
+ unit_map[cpu] = NR_CPUS;
|
|
|
+ pcpu_first_unit_cpu = NR_CPUS;
|
|
|
+
|
|
|
+ for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
|
|
|
+ const struct pcpu_group_info *gi = &ai->groups[group];
|
|
|
+
|
|
|
+ group_offsets[group] = gi->base_offset;
|
|
|
+ group_sizes[group] = gi->nr_units * ai->unit_size;
|
|
|
+
|
|
|
+ for (i = 0; i < gi->nr_units; i++) {
|
|
|
+ cpu = gi->cpu_map[i];
|
|
|
+ if (cpu == NR_CPUS)
|
|
|
+ continue;
|
|
|
|
|
|
- pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
|
|
|
- pcpu_chunk_size = nr_cpu_ids * pcpu_unit_size;
|
|
|
- pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
|
|
|
- + nr_cpu_ids * pcpu_unit_pages * sizeof(struct page *);
|
|
|
+ BUG_ON(cpu > nr_cpu_ids || !cpu_possible(cpu));
|
|
|
+ BUG_ON(unit_map[cpu] != NR_CPUS);
|
|
|
|
|
|
- if (dyn_size < 0)
|
|
|
- dyn_size = pcpu_unit_size - static_size - reserved_size;
|
|
|
+ unit_map[cpu] = unit + i;
|
|
|
+ unit_off[cpu] = gi->base_offset + i * ai->unit_size;
|
|
|
+
|
|
|
+ if (pcpu_first_unit_cpu == NR_CPUS)
|
|
|
+ pcpu_first_unit_cpu = cpu;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ pcpu_last_unit_cpu = cpu;
|
|
|
+ pcpu_nr_units = unit;
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ BUG_ON(unit_map[cpu] == NR_CPUS);
|
|
|
+
|
|
|
+ pcpu_nr_groups = ai->nr_groups;
|
|
|
+ pcpu_group_offsets = group_offsets;
|
|
|
+ pcpu_group_sizes = group_sizes;
|
|
|
+ pcpu_unit_map = unit_map;
|
|
|
+ pcpu_unit_offsets = unit_off;
|
|
|
+
|
|
|
+ /* determine basic parameters */
|
|
|
+ pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
|
|
|
+ pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
|
|
|
+ pcpu_atom_size = ai->atom_size;
|
|
|
+ pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
|
|
|
+ BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
|
|
|
|
|
|
/*
|
|
|
* Allocate chunk slots. The additional last slot is for
|
|
@@ -1105,189 +1665,351 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
|
|
|
*/
|
|
|
schunk = alloc_bootmem(pcpu_chunk_struct_size);
|
|
|
INIT_LIST_HEAD(&schunk->list);
|
|
|
- schunk->vm = &first_vm;
|
|
|
+ schunk->base_addr = base_addr;
|
|
|
schunk->map = smap;
|
|
|
schunk->map_alloc = ARRAY_SIZE(smap);
|
|
|
- schunk->page = schunk->page_ar;
|
|
|
+ schunk->immutable = true;
|
|
|
+ bitmap_fill(schunk->populated, pcpu_unit_pages);
|
|
|
|
|
|
- if (reserved_size) {
|
|
|
- schunk->free_size = reserved_size;
|
|
|
+ if (ai->reserved_size) {
|
|
|
+ schunk->free_size = ai->reserved_size;
|
|
|
pcpu_reserved_chunk = schunk;
|
|
|
- pcpu_reserved_chunk_limit = static_size + reserved_size;
|
|
|
+ pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
|
|
|
} else {
|
|
|
schunk->free_size = dyn_size;
|
|
|
dyn_size = 0; /* dynamic area covered */
|
|
|
}
|
|
|
schunk->contig_hint = schunk->free_size;
|
|
|
|
|
|
- schunk->map[schunk->map_used++] = -static_size;
|
|
|
+ schunk->map[schunk->map_used++] = -ai->static_size;
|
|
|
if (schunk->free_size)
|
|
|
schunk->map[schunk->map_used++] = schunk->free_size;
|
|
|
|
|
|
/* init dynamic chunk if necessary */
|
|
|
if (dyn_size) {
|
|
|
- dchunk = alloc_bootmem(sizeof(struct pcpu_chunk));
|
|
|
+ dchunk = alloc_bootmem(pcpu_chunk_struct_size);
|
|
|
INIT_LIST_HEAD(&dchunk->list);
|
|
|
- dchunk->vm = &first_vm;
|
|
|
+ dchunk->base_addr = base_addr;
|
|
|
dchunk->map = dmap;
|
|
|
dchunk->map_alloc = ARRAY_SIZE(dmap);
|
|
|
- dchunk->page = schunk->page_ar; /* share page map with schunk */
|
|
|
+ dchunk->immutable = true;
|
|
|
+ bitmap_fill(dchunk->populated, pcpu_unit_pages);
|
|
|
|
|
|
dchunk->contig_hint = dchunk->free_size = dyn_size;
|
|
|
dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
|
|
|
dchunk->map[dchunk->map_used++] = dchunk->free_size;
|
|
|
}
|
|
|
|
|
|
- /* allocate vm address */
|
|
|
- first_vm.flags = VM_ALLOC;
|
|
|
- first_vm.size = pcpu_chunk_size;
|
|
|
-
|
|
|
- if (!base_addr)
|
|
|
- vm_area_register_early(&first_vm, PAGE_SIZE);
|
|
|
- else {
|
|
|
- /*
|
|
|
- * Pages already mapped. No need to remap into
|
|
|
- * vmalloc area. In this case the first chunks can't
|
|
|
- * be mapped or unmapped by percpu and are marked
|
|
|
- * immutable.
|
|
|
- */
|
|
|
- first_vm.addr = base_addr;
|
|
|
- schunk->immutable = true;
|
|
|
- if (dchunk)
|
|
|
- dchunk->immutable = true;
|
|
|
- }
|
|
|
-
|
|
|
- /* assign pages */
|
|
|
- nr_pages = -1;
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- for (i = 0; i < pcpu_unit_pages; i++) {
|
|
|
- struct page *page = get_page_fn(cpu, i);
|
|
|
-
|
|
|
- if (!page)
|
|
|
- break;
|
|
|
- *pcpu_chunk_pagep(schunk, cpu, i) = page;
|
|
|
- }
|
|
|
-
|
|
|
- BUG_ON(i < PFN_UP(static_size));
|
|
|
-
|
|
|
- if (nr_pages < 0)
|
|
|
- nr_pages = i;
|
|
|
- else
|
|
|
- BUG_ON(nr_pages != i);
|
|
|
- }
|
|
|
-
|
|
|
- /* map them */
|
|
|
- if (populate_pte_fn) {
|
|
|
- for_each_possible_cpu(cpu)
|
|
|
- for (i = 0; i < nr_pages; i++)
|
|
|
- populate_pte_fn(pcpu_chunk_addr(schunk,
|
|
|
- cpu, i));
|
|
|
-
|
|
|
- err = pcpu_map(schunk, 0, nr_pages);
|
|
|
- if (err)
|
|
|
- panic("failed to setup static percpu area, err=%d\n",
|
|
|
- err);
|
|
|
- }
|
|
|
-
|
|
|
/* link the first chunk in */
|
|
|
pcpu_first_chunk = dchunk ?: schunk;
|
|
|
pcpu_chunk_relocate(pcpu_first_chunk, -1);
|
|
|
|
|
|
/* we're done */
|
|
|
- pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
|
|
|
- return pcpu_unit_size;
|
|
|
+ pcpu_base_addr = base_addr;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Embedding first chunk setup helper.
|
|
|
- */
|
|
|
-static void *pcpue_ptr __initdata;
|
|
|
-static size_t pcpue_size __initdata;
|
|
|
-static size_t pcpue_unit_size __initdata;
|
|
|
+const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
|
|
|
+ [PCPU_FC_AUTO] = "auto",
|
|
|
+ [PCPU_FC_EMBED] = "embed",
|
|
|
+ [PCPU_FC_PAGE] = "page",
|
|
|
+};
|
|
|
|
|
|
-static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
|
|
|
-{
|
|
|
- size_t off = (size_t)pageno << PAGE_SHIFT;
|
|
|
+enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
|
|
|
|
|
|
- if (off >= pcpue_size)
|
|
|
- return NULL;
|
|
|
+static int __init percpu_alloc_setup(char *str)
|
|
|
+{
|
|
|
+ if (0)
|
|
|
+ /* nada */;
|
|
|
+#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
|
|
|
+ else if (!strcmp(str, "embed"))
|
|
|
+ pcpu_chosen_fc = PCPU_FC_EMBED;
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
|
|
|
+ else if (!strcmp(str, "page"))
|
|
|
+ pcpu_chosen_fc = PCPU_FC_PAGE;
|
|
|
+#endif
|
|
|
+ else
|
|
|
+ pr_warning("PERCPU: unknown allocator %s specified\n", str);
|
|
|
|
|
|
- return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
|
|
|
+ return 0;
|
|
|
}
|
|
|
+early_param("percpu_alloc", percpu_alloc_setup);
|
|
|
|
|
|
+#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
|
|
|
+ !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
|
|
|
/**
|
|
|
* pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
|
|
|
- * @static_size: the size of static percpu area in bytes
|
|
|
* @reserved_size: the size of reserved percpu area in bytes
|
|
|
* @dyn_size: free size for dynamic allocation in bytes, -1 for auto
|
|
|
- * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
|
|
|
+ * @atom_size: allocation atom size
|
|
|
+ * @cpu_distance_fn: callback to determine distance between cpus, optional
|
|
|
+ * @alloc_fn: function to allocate percpu page
|
|
|
+ * @free_fn: funtion to free percpu page
|
|
|
*
|
|
|
* This is a helper to ease setting up embedded first percpu chunk and
|
|
|
* can be called where pcpu_setup_first_chunk() is expected.
|
|
|
*
|
|
|
* If this function is used to setup the first chunk, it is allocated
|
|
|
- * as a contiguous area using bootmem allocator and used as-is without
|
|
|
- * being mapped into vmalloc area. This enables the first chunk to
|
|
|
- * piggy back on the linear physical mapping which often uses larger
|
|
|
- * page size.
|
|
|
+ * by calling @alloc_fn and used as-is without being mapped into
|
|
|
+ * vmalloc area. Allocations are always whole multiples of @atom_size
|
|
|
+ * aligned to @atom_size.
|
|
|
+ *
|
|
|
+ * This enables the first chunk to piggy back on the linear physical
|
|
|
+ * mapping which often uses larger page size. Please note that this
|
|
|
+ * can result in very sparse cpu->unit mapping on NUMA machines thus
|
|
|
+ * requiring large vmalloc address space. Don't use this allocator if
|
|
|
+ * vmalloc space is not orders of magnitude larger than distances
|
|
|
+ * between node memory addresses (ie. 32bit NUMA machines).
|
|
|
*
|
|
|
* When @dyn_size is positive, dynamic area might be larger than
|
|
|
- * specified to fill page alignment. Also, when @dyn_size is auto,
|
|
|
- * @dyn_size does not fill the whole first chunk but only what's
|
|
|
- * necessary for page alignment after static and reserved areas.
|
|
|
+ * specified to fill page alignment. When @dyn_size is auto,
|
|
|
+ * @dyn_size is just big enough to fill page alignment after static
|
|
|
+ * and reserved areas.
|
|
|
*
|
|
|
* If the needed size is smaller than the minimum or specified unit
|
|
|
- * size, the leftover is returned to the bootmem allocator.
|
|
|
+ * size, the leftover is returned using @free_fn.
|
|
|
*
|
|
|
* RETURNS:
|
|
|
- * The determined pcpu_unit_size which can be used to initialize
|
|
|
- * percpu access on success, -errno on failure.
|
|
|
+ * 0 on success, -errno on failure.
|
|
|
*/
|
|
|
-ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
|
|
|
- ssize_t dyn_size, ssize_t unit_size)
|
|
|
+int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
|
|
|
+ size_t atom_size,
|
|
|
+ pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
|
|
|
+ pcpu_fc_alloc_fn_t alloc_fn,
|
|
|
+ pcpu_fc_free_fn_t free_fn)
|
|
|
{
|
|
|
- size_t chunk_size;
|
|
|
- unsigned int cpu;
|
|
|
+ void *base = (void *)ULONG_MAX;
|
|
|
+ void **areas = NULL;
|
|
|
+ struct pcpu_alloc_info *ai;
|
|
|
+ size_t size_sum, areas_size;
|
|
|
+ int group, i, rc;
|
|
|
+
|
|
|
+ ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
|
|
|
+ cpu_distance_fn);
|
|
|
+ if (IS_ERR(ai))
|
|
|
+ return PTR_ERR(ai);
|
|
|
+
|
|
|
+ size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
|
|
|
+ areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
|
|
|
+
|
|
|
+ areas = alloc_bootmem_nopanic(areas_size);
|
|
|
+ if (!areas) {
|
|
|
+ rc = -ENOMEM;
|
|
|
+ goto out_free;
|
|
|
+ }
|
|
|
|
|
|
- /* determine parameters and allocate */
|
|
|
- pcpue_size = PFN_ALIGN(static_size + reserved_size +
|
|
|
- (dyn_size >= 0 ? dyn_size : 0));
|
|
|
- if (dyn_size != 0)
|
|
|
- dyn_size = pcpue_size - static_size - reserved_size;
|
|
|
-
|
|
|
- if (unit_size >= 0) {
|
|
|
- BUG_ON(unit_size < pcpue_size);
|
|
|
- pcpue_unit_size = unit_size;
|
|
|
- } else
|
|
|
- pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
|
|
|
-
|
|
|
- chunk_size = pcpue_unit_size * nr_cpu_ids;
|
|
|
-
|
|
|
- pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
|
|
|
- __pa(MAX_DMA_ADDRESS));
|
|
|
- if (!pcpue_ptr) {
|
|
|
- pr_warning("PERCPU: failed to allocate %zu bytes for "
|
|
|
- "embedding\n", chunk_size);
|
|
|
- return -ENOMEM;
|
|
|
+ /* allocate, copy and determine base address */
|
|
|
+ for (group = 0; group < ai->nr_groups; group++) {
|
|
|
+ struct pcpu_group_info *gi = &ai->groups[group];
|
|
|
+ unsigned int cpu = NR_CPUS;
|
|
|
+ void *ptr;
|
|
|
+
|
|
|
+ for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
|
|
|
+ cpu = gi->cpu_map[i];
|
|
|
+ BUG_ON(cpu == NR_CPUS);
|
|
|
+
|
|
|
+ /* allocate space for the whole group */
|
|
|
+ ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
|
|
|
+ if (!ptr) {
|
|
|
+ rc = -ENOMEM;
|
|
|
+ goto out_free_areas;
|
|
|
+ }
|
|
|
+ areas[group] = ptr;
|
|
|
+
|
|
|
+ base = min(ptr, base);
|
|
|
+
|
|
|
+ for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
|
|
|
+ if (gi->cpu_map[i] == NR_CPUS) {
|
|
|
+ /* unused unit, free whole */
|
|
|
+ free_fn(ptr, ai->unit_size);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ /* copy and return the unused part */
|
|
|
+ memcpy(ptr, __per_cpu_load, ai->static_size);
|
|
|
+ free_fn(ptr + size_sum, ai->unit_size - size_sum);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- /* return the leftover and copy */
|
|
|
- for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
|
|
|
- void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
|
|
|
+ /* base address is now known, determine group base offsets */
|
|
|
+ for (group = 0; group < ai->nr_groups; group++)
|
|
|
+ ai->groups[group].base_offset = areas[group] - base;
|
|
|
+
|
|
|
+ pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
|
|
|
+ PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
|
|
|
+ ai->dyn_size, ai->unit_size);
|
|
|
+
|
|
|
+ rc = pcpu_setup_first_chunk(ai, base);
|
|
|
+ goto out_free;
|
|
|
+
|
|
|
+out_free_areas:
|
|
|
+ for (group = 0; group < ai->nr_groups; group++)
|
|
|
+ free_fn(areas[group],
|
|
|
+ ai->groups[group].nr_units * ai->unit_size);
|
|
|
+out_free:
|
|
|
+ pcpu_free_alloc_info(ai);
|
|
|
+ if (areas)
|
|
|
+ free_bootmem(__pa(areas), areas_size);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
|
|
|
+ !CONFIG_HAVE_SETUP_PER_CPU_AREA */
|
|
|
+
|
|
|
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
|
|
|
+/**
|
|
|
+ * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
|
|
|
+ * @reserved_size: the size of reserved percpu area in bytes
|
|
|
+ * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
|
|
|
+ * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
|
|
|
+ * @populate_pte_fn: function to populate pte
|
|
|
+ *
|
|
|
+ * This is a helper to ease setting up page-remapped first percpu
|
|
|
+ * chunk and can be called where pcpu_setup_first_chunk() is expected.
|
|
|
+ *
|
|
|
+ * This is the basic allocator. Static percpu area is allocated
|
|
|
+ * page-by-page into vmalloc area.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * 0 on success, -errno on failure.
|
|
|
+ */
|
|
|
+int __init pcpu_page_first_chunk(size_t reserved_size,
|
|
|
+ pcpu_fc_alloc_fn_t alloc_fn,
|
|
|
+ pcpu_fc_free_fn_t free_fn,
|
|
|
+ pcpu_fc_populate_pte_fn_t populate_pte_fn)
|
|
|
+{
|
|
|
+ static struct vm_struct vm;
|
|
|
+ struct pcpu_alloc_info *ai;
|
|
|
+ char psize_str[16];
|
|
|
+ int unit_pages;
|
|
|
+ size_t pages_size;
|
|
|
+ struct page **pages;
|
|
|
+ int unit, i, j, rc;
|
|
|
+
|
|
|
+ snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
|
|
|
+
|
|
|
+ ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
|
|
|
+ if (IS_ERR(ai))
|
|
|
+ return PTR_ERR(ai);
|
|
|
+ BUG_ON(ai->nr_groups != 1);
|
|
|
+ BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
|
|
|
+
|
|
|
+ unit_pages = ai->unit_size >> PAGE_SHIFT;
|
|
|
+
|
|
|
+ /* unaligned allocations can't be freed, round up to page size */
|
|
|
+ pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
|
|
|
+ sizeof(pages[0]));
|
|
|
+ pages = alloc_bootmem(pages_size);
|
|
|
+
|
|
|
+ /* allocate pages */
|
|
|
+ j = 0;
|
|
|
+ for (unit = 0; unit < num_possible_cpus(); unit++)
|
|
|
+ for (i = 0; i < unit_pages; i++) {
|
|
|
+ unsigned int cpu = ai->groups[0].cpu_map[unit];
|
|
|
+ void *ptr;
|
|
|
+
|
|
|
+ ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
|
|
|
+ if (!ptr) {
|
|
|
+ pr_warning("PERCPU: failed to allocate %s page "
|
|
|
+ "for cpu%u\n", psize_str, cpu);
|
|
|
+ goto enomem;
|
|
|
+ }
|
|
|
+ pages[j++] = virt_to_page(ptr);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* allocate vm area, map the pages and copy static data */
|
|
|
+ vm.flags = VM_ALLOC;
|
|
|
+ vm.size = num_possible_cpus() * ai->unit_size;
|
|
|
+ vm_area_register_early(&vm, PAGE_SIZE);
|
|
|
+
|
|
|
+ for (unit = 0; unit < num_possible_cpus(); unit++) {
|
|
|
+ unsigned long unit_addr =
|
|
|
+ (unsigned long)vm.addr + unit * ai->unit_size;
|
|
|
+
|
|
|
+ for (i = 0; i < unit_pages; i++)
|
|
|
+ populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
|
|
|
+
|
|
|
+ /* pte already populated, the following shouldn't fail */
|
|
|
+ rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
|
|
|
+ unit_pages);
|
|
|
+ if (rc < 0)
|
|
|
+ panic("failed to map percpu area, err=%d\n", rc);
|
|
|
|
|
|
- if (cpu_possible(cpu)) {
|
|
|
- free_bootmem(__pa(ptr + pcpue_size),
|
|
|
- pcpue_unit_size - pcpue_size);
|
|
|
- memcpy(ptr, __per_cpu_load, static_size);
|
|
|
- } else
|
|
|
- free_bootmem(__pa(ptr), pcpue_unit_size);
|
|
|
+ /*
|
|
|
+ * FIXME: Archs with virtual cache should flush local
|
|
|
+ * cache for the linear mapping here - something
|
|
|
+ * equivalent to flush_cache_vmap() on the local cpu.
|
|
|
+ * flush_cache_vmap() can't be used as most supporting
|
|
|
+ * data structures are not set up yet.
|
|
|
+ */
|
|
|
+
|
|
|
+ /* copy static data */
|
|
|
+ memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
|
|
|
}
|
|
|
|
|
|
/* we're ready, commit */
|
|
|
- pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
|
|
|
- pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
|
|
|
+ pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
|
|
|
+ unit_pages, psize_str, vm.addr, ai->static_size,
|
|
|
+ ai->reserved_size, ai->dyn_size);
|
|
|
+
|
|
|
+ rc = pcpu_setup_first_chunk(ai, vm.addr);
|
|
|
+ goto out_free_ar;
|
|
|
+
|
|
|
+enomem:
|
|
|
+ while (--j >= 0)
|
|
|
+ free_fn(page_address(pages[j]), PAGE_SIZE);
|
|
|
+ rc = -ENOMEM;
|
|
|
+out_free_ar:
|
|
|
+ free_bootmem(__pa(pages), pages_size);
|
|
|
+ pcpu_free_alloc_info(ai);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Generic percpu area setup.
|
|
|
+ *
|
|
|
+ * The embedding helper is used because its behavior closely resembles
|
|
|
+ * the original non-dynamic generic percpu area setup. This is
|
|
|
+ * important because many archs have addressing restrictions and might
|
|
|
+ * fail if the percpu area is located far away from the previous
|
|
|
+ * location. As an added bonus, in non-NUMA cases, embedding is
|
|
|
+ * generally a good idea TLB-wise because percpu area can piggy back
|
|
|
+ * on the physical linear memory mapping which uses large page
|
|
|
+ * mappings on applicable archs.
|
|
|
+ */
|
|
|
+#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
|
|
|
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
|
|
|
+EXPORT_SYMBOL(__per_cpu_offset);
|
|
|
+
|
|
|
+static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
|
|
|
+ size_t align)
|
|
|
+{
|
|
|
+ return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
|
|
|
+}
|
|
|
|
|
|
- return pcpu_setup_first_chunk(pcpue_get_page, static_size,
|
|
|
- reserved_size, dyn_size,
|
|
|
- pcpue_unit_size, pcpue_ptr, NULL);
|
|
|
+static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
|
|
|
+{
|
|
|
+ free_bootmem(__pa(ptr), size);
|
|
|
+}
|
|
|
+
|
|
|
+void __init setup_per_cpu_areas(void)
|
|
|
+{
|
|
|
+ unsigned long delta;
|
|
|
+ unsigned int cpu;
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Always reserve area for module percpu variables. That's
|
|
|
+ * what the legacy allocator did.
|
|
|
+ */
|
|
|
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
|
|
|
+ PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
|
|
|
+ pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
|
|
|
+ if (rc < 0)
|
|
|
+ panic("Failed to initialized percpu areas.");
|
|
|
+
|
|
|
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
|
|
|
}
|
|
|
+#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
|