Browse Source

[PATCH] x86_64: add __meminit for memory hotplug

Add __meminit to the __init lineup to ensure functions default
to __init when memory hotplug is not enabled.  Replace __devinit
with __meminit on functions that were changed when the memory
hotplug code was introduced.

Signed-off-by: Matt Tolentino <matthew.e.tolentino@intel.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Matt Tolentino 20 years ago
parent
commit
c09b42404d
3 changed files with 20 additions and 8 deletions
  1. 1 1
      arch/i386/mm/init.c
  2. 12 0
      include/linux/init.h
  3. 7 7
      mm/page_alloc.c

+ 1 - 1
arch/i386/mm/init.c

@@ -268,7 +268,7 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
 	pkmap_page_table = pte;	
 	pkmap_page_table = pte;	
 }
 }
 
 
-static void __devinit free_new_highpage(struct page *page)
+static void __meminit free_new_highpage(struct page *page)
 {
 {
 	set_page_count(page, 1);
 	set_page_count(page, 1);
 	__free_page(page);
 	__free_page(page);

+ 12 - 0
include/linux/init.h

@@ -241,6 +241,18 @@ void __init parse_early_param(void);
 #define __cpuexitdata	__exitdata
 #define __cpuexitdata	__exitdata
 #endif
 #endif
 
 
+#ifdef CONFIG_MEMORY_HOTPLUG
+#define __meminit
+#define __meminitdata
+#define __memexit
+#define __memexitdata
+#else
+#define __meminit	__init
+#define __meminitdata __initdata
+#define __memexit __exit
+#define __memexitdata	__exitdata
+#endif
+
 /* Functions marked as __devexit may be discarded at kernel link time, depending
 /* Functions marked as __devexit may be discarded at kernel link time, depending
    on config options.  Newer versions of binutils detect references from
    on config options.  Newer versions of binutils detect references from
    retained sections to discarded sections and flag an error.  Pointers to
    retained sections to discarded sections and flag an error.  Pointers to

+ 7 - 7
mm/page_alloc.c

@@ -1735,7 +1735,7 @@ static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
  * up by free_all_bootmem() once the early boot process is
  * up by free_all_bootmem() once the early boot process is
  * done. Non-atomic initialization, single-pass.
  * done. Non-atomic initialization, single-pass.
  */
  */
-void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
+void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
 		unsigned long start_pfn)
 		unsigned long start_pfn)
 {
 {
 	struct page *page;
 	struct page *page;
@@ -1788,7 +1788,7 @@ void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
 	memmap_init_zone((size), (nid), (zone), (start_pfn))
 	memmap_init_zone((size), (nid), (zone), (start_pfn))
 #endif
 #endif
 
 
-static int __devinit zone_batchsize(struct zone *zone)
+static int __meminit zone_batchsize(struct zone *zone)
 {
 {
 	int batch;
 	int batch;
 
 
@@ -1882,7 +1882,7 @@ static struct per_cpu_pageset
  * Dynamically allocate memory for the
  * Dynamically allocate memory for the
  * per cpu pageset array in struct zone.
  * per cpu pageset array in struct zone.
  */
  */
-static int __devinit process_zones(int cpu)
+static int __meminit process_zones(int cpu)
 {
 {
 	struct zone *zone, *dzone;
 	struct zone *zone, *dzone;
 
 
@@ -1923,7 +1923,7 @@ static inline void free_zone_pagesets(int cpu)
 	}
 	}
 }
 }
 
 
-static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
+static int __meminit pageset_cpuup_callback(struct notifier_block *nfb,
 		unsigned long action,
 		unsigned long action,
 		void *hcpu)
 		void *hcpu)
 {
 {
@@ -1963,7 +1963,7 @@ void __init setup_per_cpu_pageset(void)
 
 
 #endif
 #endif
 
 
-static __devinit
+static __meminit
 void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
 void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
 {
 {
 	int i;
 	int i;
@@ -1983,7 +1983,7 @@ void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
 		init_waitqueue_head(zone->wait_table + i);
 		init_waitqueue_head(zone->wait_table + i);
 }
 }
 
 
-static __devinit void zone_pcp_init(struct zone *zone)
+static __meminit void zone_pcp_init(struct zone *zone)
 {
 {
 	int cpu;
 	int cpu;
 	unsigned long batch = zone_batchsize(zone);
 	unsigned long batch = zone_batchsize(zone);
@@ -2001,7 +2001,7 @@ static __devinit void zone_pcp_init(struct zone *zone)
 		zone->name, zone->present_pages, batch);
 		zone->name, zone->present_pages, batch);
 }
 }
 
 
-static __devinit void init_currently_empty_zone(struct zone *zone,
+static __meminit void init_currently_empty_zone(struct zone *zone,
 		unsigned long zone_start_pfn, unsigned long size)
 		unsigned long zone_start_pfn, unsigned long size)
 {
 {
 	struct pglist_data *pgdat = zone->zone_pgdat;
 	struct pglist_data *pgdat = zone->zone_pgdat;