|
@@ -465,46 +465,65 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Mark all currently memblock-reserved physical memory (which covers the
|
|
|
|
+ * kernel's own memory ranges) as hot-unswappable.
|
|
|
|
+ */
|
|
static void __init numa_clear_kernel_node_hotplug(void)
|
|
static void __init numa_clear_kernel_node_hotplug(void)
|
|
{
|
|
{
|
|
- int i, nid;
|
|
|
|
- nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
|
|
|
|
- phys_addr_t start, end;
|
|
|
|
- struct memblock_region *r;
|
|
|
|
|
|
+ nodemask_t reserved_nodemask = NODE_MASK_NONE;
|
|
|
|
+ struct memblock_region *mb_region;
|
|
|
|
+ int i;
|
|
|
|
|
|
/*
|
|
/*
|
|
|
|
+ * We have to do some preprocessing of memblock regions, to
|
|
|
|
+ * make them suitable for reservation.
|
|
|
|
+ *
|
|
* At this time, all memory regions reserved by memblock are
|
|
* At this time, all memory regions reserved by memblock are
|
|
- * used by the kernel. Set the nid in memblock.reserved will
|
|
|
|
- * mark out all the nodes the kernel resides in.
|
|
|
|
|
|
+ * used by the kernel, but those regions are not split up
|
|
|
|
+ * along node boundaries yet, and don't necessarily have their
|
|
|
|
+ * node ID set yet either.
|
|
|
|
+ *
|
|
|
|
+ * So iterate over all memory known to the x86 architecture,
|
|
|
|
+ * and use those ranges to set the nid in memblock.reserved.
|
|
|
|
+ * This will split up the memblock regions along node
|
|
|
|
+ * boundaries and will set the node IDs as well.
|
|
*/
|
|
*/
|
|
for (i = 0; i < numa_meminfo.nr_blks; i++) {
|
|
for (i = 0; i < numa_meminfo.nr_blks; i++) {
|
|
- struct numa_memblk *mb = &numa_meminfo.blk[i];
|
|
|
|
|
|
+ struct numa_memblk *mb = numa_meminfo.blk + i;
|
|
|
|
|
|
- memblock_set_node(mb->start, mb->end - mb->start,
|
|
|
|
- &memblock.reserved, mb->nid);
|
|
|
|
|
|
+ memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Mark all kernel nodes.
|
|
|
|
|
|
+ * Now go over all reserved memblock regions, to construct a
|
|
|
|
+ * node mask of all kernel reserved memory areas.
|
|
*
|
|
*
|
|
- * When booting with mem=nn[kMG] or in a kdump kernel, numa_meminfo
|
|
|
|
- * may not include all the memblock.reserved memory ranges because
|
|
|
|
- * trim_snb_memory() reserves specific pages for Sandy Bridge graphics.
|
|
|
|
|
|
+ * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
|
|
|
|
+ * numa_meminfo might not include all memblock.reserved
|
|
|
|
+ * memory ranges, because quirks such as trim_snb_memory()
|
|
|
|
+ * reserve specific pages for Sandy Bridge graphics. ]
|
|
*/
|
|
*/
|
|
- for_each_memblock(reserved, r)
|
|
|
|
- if (r->nid != MAX_NUMNODES)
|
|
|
|
- node_set(r->nid, numa_kernel_nodes);
|
|
|
|
|
|
+ for_each_memblock(reserved, mb_region) {
|
|
|
|
+ if (mb_region->nid != MAX_NUMNODES)
|
|
|
|
+ node_set(mb_region->nid, reserved_nodemask);
|
|
|
|
+ }
|
|
|
|
|
|
- /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
|
|
|
|
+ * belonging to the reserved node mask.
|
|
|
|
+ *
|
|
|
|
+ * Note that this will include memory regions that reside
|
|
|
|
+ * on nodes that contain kernel memory - entire nodes
|
|
|
|
+ * become hot-unpluggable:
|
|
|
|
+ */
|
|
for (i = 0; i < numa_meminfo.nr_blks; i++) {
|
|
for (i = 0; i < numa_meminfo.nr_blks; i++) {
|
|
- nid = numa_meminfo.blk[i].nid;
|
|
|
|
- if (!node_isset(nid, numa_kernel_nodes))
|
|
|
|
- continue;
|
|
|
|
|
|
+ struct numa_memblk *mb = numa_meminfo.blk + i;
|
|
|
|
|
|
- start = numa_meminfo.blk[i].start;
|
|
|
|
- end = numa_meminfo.blk[i].end;
|
|
|
|
|
|
+ if (!node_isset(mb->nid, reserved_nodemask))
|
|
|
|
+ continue;
|
|
|
|
|
|
- memblock_clear_hotplug(start, end - start);
|
|
|
|
|
|
+ memblock_clear_hotplug(mb->start, mb->end - mb->start);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|