|
|
@@ -200,6 +200,11 @@ static inline int next_present_section_nr(int section_nr)
|
|
|
(section_nr <= __highest_present_section_nr)); \
|
|
|
section_nr = next_present_section_nr(section_nr))
|
|
|
|
|
|
+static inline unsigned long first_present_section_nr(void)
|
|
|
+{
|
|
|
+ return next_present_section_nr(-1);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Record how many memory sections are marked as present
|
|
|
* during system bootup.
|
|
|
@@ -668,6 +673,86 @@ void __init sparse_init(void)
|
|
|
memblock_free_early(__pa(usemap_map), size);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
|
|
|
+ * And number of present sections in this node is map_count.
|
|
|
+ */
|
|
|
+static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
|
|
|
+ unsigned long pnum_end,
|
|
|
+ unsigned long map_count)
|
|
|
+{
|
|
|
+ unsigned long pnum, usemap_longs, *usemap;
|
|
|
+ struct page *map;
|
|
|
+
|
|
|
+ usemap_longs = BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS);
|
|
|
+ usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
|
|
|
+ usemap_size() *
|
|
|
+ map_count);
|
|
|
+ if (!usemap) {
|
|
|
+ pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
|
|
|
+ goto failed;
|
|
|
+ }
|
|
|
+ sparse_buffer_init(map_count * section_map_size(), nid);
|
|
|
+ for_each_present_section_nr(pnum_begin, pnum) {
|
|
|
+ if (pnum >= pnum_end)
|
|
|
+ break;
|
|
|
+
|
|
|
+ map = sparse_mem_map_populate(pnum, nid, NULL);
|
|
|
+ if (!map) {
|
|
|
+ pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
|
|
|
+ __func__, nid);
|
|
|
+ pnum_begin = pnum;
|
|
|
+ goto failed;
|
|
|
+ }
|
|
|
+ check_usemap_section_nr(nid, usemap);
|
|
|
+ sparse_init_one_section(__nr_to_section(pnum), pnum, map, usemap);
|
|
|
+ usemap += usemap_longs;
|
|
|
+ }
|
|
|
+ sparse_buffer_fini();
|
|
|
+ return;
|
|
|
+failed:
|
|
|
+ /* We failed to allocate, mark all the following pnums as not present */
|
|
|
+ for_each_present_section_nr(pnum_begin, pnum) {
|
|
|
+ struct mem_section *ms;
|
|
|
+
|
|
|
+ if (pnum >= pnum_end)
|
|
|
+ break;
|
|
|
+ ms = __nr_to_section(pnum);
|
|
|
+ ms->section_mem_map = 0;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Allocate the accumulated non-linear sections, allocate a mem_map
|
|
|
+ * for each and record the physical to section mapping.
|
|
|
+ */
|
|
|
+void __init new_sparse_init(void)
|
|
|
+{
|
|
|
+ unsigned long pnum_begin = first_present_section_nr();
|
|
|
+ int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
|
|
|
+ unsigned long pnum_end, map_count = 1;
|
|
|
+
|
|
|
+ /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
|
|
|
+ set_pageblock_order();
|
|
|
+
|
|
|
+ for_each_present_section_nr(pnum_begin + 1, pnum_end) {
|
|
|
+ int nid = sparse_early_nid(__nr_to_section(pnum_end));
|
|
|
+
|
|
|
+ if (nid == nid_begin) {
|
|
|
+ map_count++;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ /* Init node with sections in range [pnum_begin, pnum_end) */
|
|
|
+ sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
|
|
|
+ nid_begin = nid;
|
|
|
+ pnum_begin = pnum_end;
|
|
|
+ map_count = 1;
|
|
|
+ }
|
|
|
+ /* cover the last node */
|
|
|
+ sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
|
|
|
+ vmemmap_populate_print_last();
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
|
|
|
|
|
/* Mark all memory sections within the pfn range as online */
|