cpu.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /* CPU control.
  2. * (C) 2001, 2002, 2003, 2004 Rusty Russell
  3. *
  4. * This code is licenced under the GPL.
  5. */
  6. #include <linux/proc_fs.h>
  7. #include <linux/smp.h>
  8. #include <linux/init.h>
  9. #include <linux/notifier.h>
  10. #include <linux/sched.h>
  11. #include <linux/unistd.h>
  12. #include <linux/cpu.h>
  13. #include <linux/module.h>
  14. #include <linux/kthread.h>
  15. #include <linux/stop_machine.h>
  16. #include <linux/mutex.h>
  17. /* This protects CPUs going up and down... */
  18. static DEFINE_MUTEX(cpu_add_remove_lock);
  19. static DEFINE_MUTEX(cpu_bitmask_lock);
  20. static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
  21. /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  22. * Should always be manipulated under cpu_add_remove_lock
  23. */
  24. static int cpu_hotplug_disabled;
  25. #ifdef CONFIG_HOTPLUG_CPU
  26. /* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */
  27. static struct task_struct *recursive;
  28. static int recursive_depth;
  29. void lock_cpu_hotplug(void)
  30. {
  31. struct task_struct *tsk = current;
  32. if (tsk == recursive) {
  33. static int warnings = 10;
  34. if (warnings) {
  35. printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n");
  36. WARN_ON(1);
  37. warnings--;
  38. }
  39. recursive_depth++;
  40. return;
  41. }
  42. mutex_lock(&cpu_bitmask_lock);
  43. recursive = tsk;
  44. }
  45. EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
  46. void unlock_cpu_hotplug(void)
  47. {
  48. WARN_ON(recursive != current);
  49. if (recursive_depth) {
  50. recursive_depth--;
  51. return;
  52. }
  53. recursive = NULL;
  54. mutex_unlock(&cpu_bitmask_lock);
  55. }
  56. EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
  57. #endif /* CONFIG_HOTPLUG_CPU */
  58. /* Need to know about CPUs going up/down? */
  59. int __cpuinit register_cpu_notifier(struct notifier_block *nb)
  60. {
  61. int ret;
  62. mutex_lock(&cpu_add_remove_lock);
  63. ret = raw_notifier_chain_register(&cpu_chain, nb);
  64. mutex_unlock(&cpu_add_remove_lock);
  65. return ret;
  66. }
  67. #ifdef CONFIG_HOTPLUG_CPU
  68. EXPORT_SYMBOL(register_cpu_notifier);
  69. void unregister_cpu_notifier(struct notifier_block *nb)
  70. {
  71. mutex_lock(&cpu_add_remove_lock);
  72. raw_notifier_chain_unregister(&cpu_chain, nb);
  73. mutex_unlock(&cpu_add_remove_lock);
  74. }
  75. EXPORT_SYMBOL(unregister_cpu_notifier);
  76. static inline void check_for_tasks(int cpu)
  77. {
  78. struct task_struct *p;
  79. write_lock_irq(&tasklist_lock);
  80. for_each_process(p) {
  81. if (task_cpu(p) == cpu &&
  82. (!cputime_eq(p->utime, cputime_zero) ||
  83. !cputime_eq(p->stime, cputime_zero)))
  84. printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
  85. (state = %ld, flags = %x) \n",
  86. p->comm, p->pid, cpu, p->state, p->flags);
  87. }
  88. write_unlock_irq(&tasklist_lock);
  89. }
  90. /* Take this CPU down. */
  91. static int take_cpu_down(void *unused)
  92. {
  93. int err;
  94. /* Ensure this CPU doesn't handle any more interrupts. */
  95. err = __cpu_disable();
  96. if (err < 0)
  97. return err;
  98. /* Force idle task to run as soon as we yield: it should
  99. immediately notice cpu is offline and die quickly. */
  100. sched_idle_next();
  101. return 0;
  102. }
  103. /* Requires cpu_add_remove_lock to be held */
  104. static int _cpu_down(unsigned int cpu)
  105. {
  106. int err, nr_calls = 0;
  107. struct task_struct *p;
  108. cpumask_t old_allowed, tmp;
  109. void *hcpu = (void *)(long)cpu;
  110. if (num_online_cpus() == 1)
  111. return -EBUSY;
  112. if (!cpu_online(cpu))
  113. return -EINVAL;
  114. raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
  115. err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
  116. hcpu, -1, &nr_calls);
  117. if (err == NOTIFY_BAD) {
  118. __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu,
  119. nr_calls, NULL);
  120. printk("%s: attempt to take down CPU %u failed\n",
  121. __FUNCTION__, cpu);
  122. err = -EINVAL;
  123. goto out_release;
  124. }
  125. /* Ensure that we are not runnable on dying cpu */
  126. old_allowed = current->cpus_allowed;
  127. tmp = CPU_MASK_ALL;
  128. cpu_clear(cpu, tmp);
  129. set_cpus_allowed(current, tmp);
  130. mutex_lock(&cpu_bitmask_lock);
  131. p = __stop_machine_run(take_cpu_down, NULL, cpu);
  132. mutex_unlock(&cpu_bitmask_lock);
  133. if (IS_ERR(p) || cpu_online(cpu)) {
  134. /* CPU didn't die: tell everyone. Can't complain. */
  135. if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
  136. hcpu) == NOTIFY_BAD)
  137. BUG();
  138. if (IS_ERR(p)) {
  139. err = PTR_ERR(p);
  140. goto out_allowed;
  141. }
  142. goto out_thread;
  143. }
  144. /* Wait for it to sleep (leaving idle task). */
  145. while (!idle_cpu(cpu))
  146. yield();
  147. /* This actually kills the CPU. */
  148. __cpu_die(cpu);
  149. /* Move it here so it can run. */
  150. kthread_bind(p, get_cpu());
  151. put_cpu();
  152. /* CPU is completely dead: tell everyone. Too late to complain. */
  153. if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu) == NOTIFY_BAD)
  154. BUG();
  155. check_for_tasks(cpu);
  156. out_thread:
  157. err = kthread_stop(p);
  158. out_allowed:
  159. set_cpus_allowed(current, old_allowed);
  160. out_release:
  161. raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE,
  162. (void *)(long)cpu);
  163. return err;
  164. }
  165. int cpu_down(unsigned int cpu)
  166. {
  167. int err = 0;
  168. mutex_lock(&cpu_add_remove_lock);
  169. if (cpu_hotplug_disabled)
  170. err = -EBUSY;
  171. else
  172. err = _cpu_down(cpu);
  173. mutex_unlock(&cpu_add_remove_lock);
  174. return err;
  175. }
  176. #endif /*CONFIG_HOTPLUG_CPU*/
  177. /* Requires cpu_add_remove_lock to be held */
  178. static int __cpuinit _cpu_up(unsigned int cpu)
  179. {
  180. int ret, nr_calls = 0;
  181. void *hcpu = (void *)(long)cpu;
  182. if (cpu_online(cpu) || !cpu_present(cpu))
  183. return -EINVAL;
  184. raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
  185. ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu,
  186. -1, &nr_calls);
  187. if (ret == NOTIFY_BAD) {
  188. printk("%s: attempt to bring up CPU %u failed\n",
  189. __FUNCTION__, cpu);
  190. ret = -EINVAL;
  191. goto out_notify;
  192. }
  193. /* Arch-specific enabling code. */
  194. mutex_lock(&cpu_bitmask_lock);
  195. ret = __cpu_up(cpu);
  196. mutex_unlock(&cpu_bitmask_lock);
  197. if (ret != 0)
  198. goto out_notify;
  199. BUG_ON(!cpu_online(cpu));
  200. /* Now call notifier in preparation. */
  201. raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
  202. out_notify:
  203. if (ret != 0)
  204. __raw_notifier_call_chain(&cpu_chain,
  205. CPU_UP_CANCELED, hcpu, nr_calls, NULL);
  206. raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
  207. return ret;
  208. }
  209. int __cpuinit cpu_up(unsigned int cpu)
  210. {
  211. int err = 0;
  212. mutex_lock(&cpu_add_remove_lock);
  213. if (cpu_hotplug_disabled)
  214. err = -EBUSY;
  215. else
  216. err = _cpu_up(cpu);
  217. mutex_unlock(&cpu_add_remove_lock);
  218. return err;
  219. }
  220. #ifdef CONFIG_SUSPEND_SMP
  221. /* Needed to prevent the microcode driver from requesting firmware in its CPU
  222. * hotplug notifier during the suspend/resume.
  223. */
  224. int suspend_cpu_hotplug;
  225. EXPORT_SYMBOL(suspend_cpu_hotplug);
  226. static cpumask_t frozen_cpus;
  227. int disable_nonboot_cpus(void)
  228. {
  229. int cpu, first_cpu, error = 0;
  230. mutex_lock(&cpu_add_remove_lock);
  231. suspend_cpu_hotplug = 1;
  232. first_cpu = first_cpu(cpu_online_map);
  233. /* We take down all of the non-boot CPUs in one shot to avoid races
  234. * with the userspace trying to use the CPU hotplug at the same time
  235. */
  236. cpus_clear(frozen_cpus);
  237. printk("Disabling non-boot CPUs ...\n");
  238. for_each_online_cpu(cpu) {
  239. if (cpu == first_cpu)
  240. continue;
  241. error = _cpu_down(cpu);
  242. if (!error) {
  243. cpu_set(cpu, frozen_cpus);
  244. printk("CPU%d is down\n", cpu);
  245. } else {
  246. printk(KERN_ERR "Error taking CPU%d down: %d\n",
  247. cpu, error);
  248. break;
  249. }
  250. }
  251. if (!error) {
  252. BUG_ON(num_online_cpus() > 1);
  253. /* Make sure the CPUs won't be enabled by someone else */
  254. cpu_hotplug_disabled = 1;
  255. } else {
  256. printk(KERN_ERR "Non-boot CPUs are not disabled\n");
  257. }
  258. suspend_cpu_hotplug = 0;
  259. mutex_unlock(&cpu_add_remove_lock);
  260. return error;
  261. }
  262. void enable_nonboot_cpus(void)
  263. {
  264. int cpu, error;
  265. /* Allow everyone to use the CPU hotplug again */
  266. mutex_lock(&cpu_add_remove_lock);
  267. cpu_hotplug_disabled = 0;
  268. if (cpus_empty(frozen_cpus))
  269. goto out;
  270. suspend_cpu_hotplug = 1;
  271. printk("Enabling non-boot CPUs ...\n");
  272. for_each_cpu_mask(cpu, frozen_cpus) {
  273. error = _cpu_up(cpu);
  274. if (!error) {
  275. printk("CPU%d is up\n", cpu);
  276. continue;
  277. }
  278. printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
  279. }
  280. cpus_clear(frozen_cpus);
  281. suspend_cpu_hotplug = 0;
  282. out:
  283. mutex_unlock(&cpu_add_remove_lock);
  284. }
  285. #endif