|
@@ -52,32 +52,17 @@ static void generic_online_page(struct page *page);
|
|
|
static online_page_callback_t online_page_callback = generic_online_page;
|
|
|
static DEFINE_MUTEX(online_page_callback_lock);
|
|
|
|
|
|
-/* The same as the cpu_hotplug lock, but for memory hotplug. */
|
|
|
-static struct {
|
|
|
- struct task_struct *active_writer;
|
|
|
- struct mutex lock; /* Synchronizes accesses to refcount, */
|
|
|
- /*
|
|
|
- * Also blocks the new readers during
|
|
|
- * an ongoing mem hotplug operation.
|
|
|
- */
|
|
|
- int refcount;
|
|
|
+DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
|
|
|
|
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
- struct lockdep_map dep_map;
|
|
|
-#endif
|
|
|
-} mem_hotplug = {
|
|
|
- .active_writer = NULL,
|
|
|
- .lock = __MUTEX_INITIALIZER(mem_hotplug.lock),
|
|
|
- .refcount = 0,
|
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
- .dep_map = {.name = "mem_hotplug.lock" },
|
|
|
-#endif
|
|
|
-};
|
|
|
+void get_online_mems(void)
|
|
|
+{
|
|
|
+ percpu_down_read(&mem_hotplug_lock);
|
|
|
+}
|
|
|
|
|
|
-/* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */
|
|
|
-#define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map)
|
|
|
-#define memhp_lock_acquire() lock_map_acquire(&mem_hotplug.dep_map)
|
|
|
-#define memhp_lock_release() lock_map_release(&mem_hotplug.dep_map)
|
|
|
+void put_online_mems(void)
|
|
|
+{
|
|
|
+ percpu_up_read(&mem_hotplug_lock);
|
|
|
+}
|
|
|
|
|
|
bool movable_node_enabled = false;
|
|
|
|
|
@@ -99,60 +84,16 @@ static int __init setup_memhp_default_state(char *str)
|
|
|
}
|
|
|
__setup("memhp_default_state=", setup_memhp_default_state);
|
|
|
|
|
|
-void get_online_mems(void)
|
|
|
-{
|
|
|
- might_sleep();
|
|
|
- if (mem_hotplug.active_writer == current)
|
|
|
- return;
|
|
|
- memhp_lock_acquire_read();
|
|
|
- mutex_lock(&mem_hotplug.lock);
|
|
|
- mem_hotplug.refcount++;
|
|
|
- mutex_unlock(&mem_hotplug.lock);
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
-void put_online_mems(void)
|
|
|
-{
|
|
|
- if (mem_hotplug.active_writer == current)
|
|
|
- return;
|
|
|
- mutex_lock(&mem_hotplug.lock);
|
|
|
-
|
|
|
- if (WARN_ON(!mem_hotplug.refcount))
|
|
|
- mem_hotplug.refcount++; /* try to fix things up */
|
|
|
-
|
|
|
- if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer))
|
|
|
- wake_up_process(mem_hotplug.active_writer);
|
|
|
- mutex_unlock(&mem_hotplug.lock);
|
|
|
- memhp_lock_release();
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
-/* Serializes write accesses to mem_hotplug.active_writer. */
|
|
|
-static DEFINE_MUTEX(memory_add_remove_lock);
|
|
|
-
|
|
|
void mem_hotplug_begin(void)
|
|
|
{
|
|
|
- mutex_lock(&memory_add_remove_lock);
|
|
|
-
|
|
|
- mem_hotplug.active_writer = current;
|
|
|
-
|
|
|
- memhp_lock_acquire();
|
|
|
- for (;;) {
|
|
|
- mutex_lock(&mem_hotplug.lock);
|
|
|
- if (likely(!mem_hotplug.refcount))
|
|
|
- break;
|
|
|
- __set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
- mutex_unlock(&mem_hotplug.lock);
|
|
|
- schedule();
|
|
|
- }
|
|
|
+ cpus_read_lock();
|
|
|
+ percpu_down_write(&mem_hotplug_lock);
|
|
|
}
|
|
|
|
|
|
void mem_hotplug_done(void)
|
|
|
{
|
|
|
- mem_hotplug.active_writer = NULL;
|
|
|
- mutex_unlock(&mem_hotplug.lock);
|
|
|
- memhp_lock_release();
|
|
|
- mutex_unlock(&memory_add_remove_lock);
|
|
|
+ percpu_up_write(&mem_hotplug_lock);
|
|
|
+ cpus_read_unlock();
|
|
|
}
|
|
|
|
|
|
/* add this memory to iomem resource */
|
|
@@ -1725,7 +1666,7 @@ repeat:
|
|
|
goto failed_removal;
|
|
|
ret = 0;
|
|
|
if (drain) {
|
|
|
- lru_add_drain_all();
|
|
|
+ lru_add_drain_all_cpuslocked();
|
|
|
cond_resched();
|
|
|
drain_all_pages(zone);
|
|
|
}
|
|
@@ -1746,7 +1687,7 @@ repeat:
|
|
|
}
|
|
|
}
|
|
|
/* drain all zone's lru pagevec, this is asynchronous... */
|
|
|
- lru_add_drain_all();
|
|
|
+ lru_add_drain_all_cpuslocked();
|
|
|
yield();
|
|
|
/* drain pcp pages, this is synchronous. */
|
|
|
drain_all_pages(zone);
|