cpu.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767
  1. /* CPU control.
  2. * (C) 2001, 2002, 2003, 2004 Rusty Russell
  3. *
  4. * This code is licenced under the GPL.
  5. */
  6. #include <linux/proc_fs.h>
  7. #include <linux/smp.h>
  8. #include <linux/init.h>
  9. #include <linux/notifier.h>
  10. #include <linux/sched.h>
  11. #include <linux/unistd.h>
  12. #include <linux/cpu.h>
  13. #include <linux/oom.h>
  14. #include <linux/rcupdate.h>
  15. #include <linux/export.h>
  16. #include <linux/bug.h>
  17. #include <linux/kthread.h>
  18. #include <linux/stop_machine.h>
  19. #include <linux/mutex.h>
  20. #include <linux/gfp.h>
  21. #include <linux/suspend.h>
  22. #include <linux/lockdep.h>
  23. #include <trace/events/power.h>
  24. #include "smpboot.h"
  25. #ifdef CONFIG_SMP
  26. /* Serializes the updates to cpu_online_mask, cpu_present_mask */
  27. static DEFINE_MUTEX(cpu_add_remove_lock);
  28. /*
  29. * The following two APIs (cpu_maps_update_begin/done) must be used when
  30. * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
  31. * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
  32. * hotplug callback (un)registration performed using __register_cpu_notifier()
  33. * or __unregister_cpu_notifier().
  34. */
  35. void cpu_maps_update_begin(void)
  36. {
  37. mutex_lock(&cpu_add_remove_lock);
  38. }
  39. EXPORT_SYMBOL(cpu_notifier_register_begin);
  40. void cpu_maps_update_done(void)
  41. {
  42. mutex_unlock(&cpu_add_remove_lock);
  43. }
  44. EXPORT_SYMBOL(cpu_notifier_register_done);
  45. static RAW_NOTIFIER_HEAD(cpu_chain);
  46. /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  47. * Should always be manipulated under cpu_add_remove_lock
  48. */
  49. static int cpu_hotplug_disabled;
  50. #ifdef CONFIG_HOTPLUG_CPU
  51. static struct {
  52. struct task_struct *active_writer;
  53. struct mutex lock; /* Synchronizes accesses to refcount, */
  54. /*
  55. * Also blocks the new readers during
  56. * an ongoing cpu hotplug operation.
  57. */
  58. int refcount;
  59. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  60. struct lockdep_map dep_map;
  61. #endif
  62. } cpu_hotplug = {
  63. .active_writer = NULL,
  64. .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
  65. .refcount = 0,
  66. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  67. .dep_map = {.name = "cpu_hotplug.lock" },
  68. #endif
  69. };
  70. /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
  71. #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
  72. #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
  73. #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
  74. void get_online_cpus(void)
  75. {
  76. might_sleep();
  77. if (cpu_hotplug.active_writer == current)
  78. return;
  79. cpuhp_lock_acquire_read();
  80. mutex_lock(&cpu_hotplug.lock);
  81. cpu_hotplug.refcount++;
  82. mutex_unlock(&cpu_hotplug.lock);
  83. }
  84. EXPORT_SYMBOL_GPL(get_online_cpus);
  85. void put_online_cpus(void)
  86. {
  87. if (cpu_hotplug.active_writer == current)
  88. return;
  89. mutex_lock(&cpu_hotplug.lock);
  90. if (WARN_ON(!cpu_hotplug.refcount))
  91. cpu_hotplug.refcount++; /* try to fix things up */
  92. if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
  93. wake_up_process(cpu_hotplug.active_writer);
  94. mutex_unlock(&cpu_hotplug.lock);
  95. cpuhp_lock_release();
  96. }
  97. EXPORT_SYMBOL_GPL(put_online_cpus);
  98. /*
  99. * This ensures that the hotplug operation can begin only when the
  100. * refcount goes to zero.
  101. *
  102. * Note that during a cpu-hotplug operation, the new readers, if any,
  103. * will be blocked by the cpu_hotplug.lock
  104. *
  105. * Since cpu_hotplug_begin() is always called after invoking
  106. * cpu_maps_update_begin(), we can be sure that only one writer is active.
  107. *
  108. * Note that theoretically, there is a possibility of a livelock:
  109. * - Refcount goes to zero, last reader wakes up the sleeping
  110. * writer.
  111. * - Last reader unlocks the cpu_hotplug.lock.
  112. * - A new reader arrives at this moment, bumps up the refcount.
  113. * - The writer acquires the cpu_hotplug.lock finds the refcount
  114. * non zero and goes to sleep again.
  115. *
  116. * However, this is very difficult to achieve in practice since
  117. * get_online_cpus() not an api which is called all that often.
  118. *
  119. */
  120. void cpu_hotplug_begin(void)
  121. {
  122. cpu_hotplug.active_writer = current;
  123. cpuhp_lock_acquire();
  124. for (;;) {
  125. mutex_lock(&cpu_hotplug.lock);
  126. if (likely(!cpu_hotplug.refcount))
  127. break;
  128. __set_current_state(TASK_UNINTERRUPTIBLE);
  129. mutex_unlock(&cpu_hotplug.lock);
  130. schedule();
  131. }
  132. }
  133. void cpu_hotplug_done(void)
  134. {
  135. cpu_hotplug.active_writer = NULL;
  136. mutex_unlock(&cpu_hotplug.lock);
  137. cpuhp_lock_release();
  138. }
  139. /*
  140. * Wait for currently running CPU hotplug operations to complete (if any) and
  141. * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
  142. * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
  143. * hotplug path before performing hotplug operations. So acquiring that lock
  144. * guarantees mutual exclusion from any currently running hotplug operations.
  145. */
  146. void cpu_hotplug_disable(void)
  147. {
  148. cpu_maps_update_begin();
  149. cpu_hotplug_disabled = 1;
  150. cpu_maps_update_done();
  151. }
  152. void cpu_hotplug_enable(void)
  153. {
  154. cpu_maps_update_begin();
  155. cpu_hotplug_disabled = 0;
  156. cpu_maps_update_done();
  157. }
  158. #endif /* CONFIG_HOTPLUG_CPU */
  159. /* Need to know about CPUs going up/down? */
  160. int __ref register_cpu_notifier(struct notifier_block *nb)
  161. {
  162. int ret;
  163. cpu_maps_update_begin();
  164. ret = raw_notifier_chain_register(&cpu_chain, nb);
  165. cpu_maps_update_done();
  166. return ret;
  167. }
  168. int __ref __register_cpu_notifier(struct notifier_block *nb)
  169. {
  170. return raw_notifier_chain_register(&cpu_chain, nb);
  171. }
  172. static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
  173. int *nr_calls)
  174. {
  175. int ret;
  176. ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
  177. nr_calls);
  178. return notifier_to_errno(ret);
  179. }
  180. static int cpu_notify(unsigned long val, void *v)
  181. {
  182. return __cpu_notify(val, v, -1, NULL);
  183. }
  184. #ifdef CONFIG_HOTPLUG_CPU
  185. static void cpu_notify_nofail(unsigned long val, void *v)
  186. {
  187. BUG_ON(cpu_notify(val, v));
  188. }
  189. EXPORT_SYMBOL(register_cpu_notifier);
  190. EXPORT_SYMBOL(__register_cpu_notifier);
  191. void __ref unregister_cpu_notifier(struct notifier_block *nb)
  192. {
  193. cpu_maps_update_begin();
  194. raw_notifier_chain_unregister(&cpu_chain, nb);
  195. cpu_maps_update_done();
  196. }
  197. EXPORT_SYMBOL(unregister_cpu_notifier);
  198. void __ref __unregister_cpu_notifier(struct notifier_block *nb)
  199. {
  200. raw_notifier_chain_unregister(&cpu_chain, nb);
  201. }
  202. EXPORT_SYMBOL(__unregister_cpu_notifier);
  203. /**
  204. * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
  205. * @cpu: a CPU id
  206. *
  207. * This function walks all processes, finds a valid mm struct for each one and
  208. * then clears a corresponding bit in mm's cpumask. While this all sounds
  209. * trivial, there are various non-obvious corner cases, which this function
  210. * tries to solve in a safe manner.
  211. *
  212. * Also note that the function uses a somewhat relaxed locking scheme, so it may
  213. * be called only for an already offlined CPU.
  214. */
  215. void clear_tasks_mm_cpumask(int cpu)
  216. {
  217. struct task_struct *p;
  218. /*
  219. * This function is called after the cpu is taken down and marked
  220. * offline, so its not like new tasks will ever get this cpu set in
  221. * their mm mask. -- Peter Zijlstra
  222. * Thus, we may use rcu_read_lock() here, instead of grabbing
  223. * full-fledged tasklist_lock.
  224. */
  225. WARN_ON(cpu_online(cpu));
  226. rcu_read_lock();
  227. for_each_process(p) {
  228. struct task_struct *t;
  229. /*
  230. * Main thread might exit, but other threads may still have
  231. * a valid mm. Find one.
  232. */
  233. t = find_lock_task_mm(p);
  234. if (!t)
  235. continue;
  236. cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
  237. task_unlock(t);
  238. }
  239. rcu_read_unlock();
  240. }
  241. static inline void check_for_tasks(int dead_cpu)
  242. {
  243. struct task_struct *g, *p;
  244. read_lock_irq(&tasklist_lock);
  245. do_each_thread(g, p) {
  246. if (!p->on_rq)
  247. continue;
  248. /*
  249. * We do the check with unlocked task_rq(p)->lock.
  250. * Order the reading to do not warn about a task,
  251. * which was running on this cpu in the past, and
  252. * it's just been woken on another cpu.
  253. */
  254. rmb();
  255. if (task_cpu(p) != dead_cpu)
  256. continue;
  257. pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
  258. p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
  259. } while_each_thread(g, p);
  260. read_unlock_irq(&tasklist_lock);
  261. }
  262. struct take_cpu_down_param {
  263. unsigned long mod;
  264. void *hcpu;
  265. };
  266. /* Take this CPU down. */
  267. static int __ref take_cpu_down(void *_param)
  268. {
  269. struct take_cpu_down_param *param = _param;
  270. int err;
  271. /* Ensure this CPU doesn't handle any more interrupts. */
  272. err = __cpu_disable();
  273. if (err < 0)
  274. return err;
  275. cpu_notify(CPU_DYING | param->mod, param->hcpu);
  276. /* Park the stopper thread */
  277. kthread_park(current);
  278. return 0;
  279. }
  280. /* Requires cpu_add_remove_lock to be held */
  281. static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
  282. {
  283. int err, nr_calls = 0;
  284. void *hcpu = (void *)(long)cpu;
  285. unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
  286. struct take_cpu_down_param tcd_param = {
  287. .mod = mod,
  288. .hcpu = hcpu,
  289. };
  290. if (num_online_cpus() == 1)
  291. return -EBUSY;
  292. if (!cpu_online(cpu))
  293. return -EINVAL;
  294. cpu_hotplug_begin();
  295. err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
  296. if (err) {
  297. nr_calls--;
  298. __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
  299. pr_warn("%s: attempt to take down CPU %u failed\n",
  300. __func__, cpu);
  301. goto out_release;
  302. }
  303. /*
  304. * By now we've cleared cpu_active_mask, wait for all preempt-disabled
  305. * and RCU users of this state to go away such that all new such users
  306. * will observe it.
  307. *
  308. * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
  309. * not imply sync_sched(), so explicitly call both.
  310. *
  311. * Do sync before park smpboot threads to take care the rcu boost case.
  312. */
  313. #ifdef CONFIG_PREEMPT
  314. synchronize_sched();
  315. #endif
  316. synchronize_rcu();
  317. smpboot_park_threads(cpu);
  318. /*
  319. * So now all preempt/rcu users must observe !cpu_active().
  320. */
  321. err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
  322. if (err) {
  323. /* CPU didn't die: tell everyone. Can't complain. */
  324. smpboot_unpark_threads(cpu);
  325. cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
  326. goto out_release;
  327. }
  328. BUG_ON(cpu_online(cpu));
  329. /*
  330. * The migration_call() CPU_DYING callback will have removed all
  331. * runnable tasks from the cpu, there's only the idle task left now
  332. * that the migration thread is done doing the stop_machine thing.
  333. *
  334. * Wait for the stop thread to go away.
  335. */
  336. while (!idle_cpu(cpu))
  337. cpu_relax();
  338. /* This actually kills the CPU. */
  339. __cpu_die(cpu);
  340. /* CPU is completely dead: tell everyone. Too late to complain. */
  341. cpu_notify_nofail(CPU_DEAD | mod, hcpu);
  342. check_for_tasks(cpu);
  343. out_release:
  344. cpu_hotplug_done();
  345. if (!err)
  346. cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
  347. return err;
  348. }
  349. int __ref cpu_down(unsigned int cpu)
  350. {
  351. int err;
  352. cpu_maps_update_begin();
  353. if (cpu_hotplug_disabled) {
  354. err = -EBUSY;
  355. goto out;
  356. }
  357. err = _cpu_down(cpu, 0);
  358. out:
  359. cpu_maps_update_done();
  360. return err;
  361. }
  362. EXPORT_SYMBOL(cpu_down);
  363. #endif /*CONFIG_HOTPLUG_CPU*/
  364. /* Requires cpu_add_remove_lock to be held */
  365. static int _cpu_up(unsigned int cpu, int tasks_frozen)
  366. {
  367. int ret, nr_calls = 0;
  368. void *hcpu = (void *)(long)cpu;
  369. unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
  370. struct task_struct *idle;
  371. cpu_hotplug_begin();
  372. if (cpu_online(cpu) || !cpu_present(cpu)) {
  373. ret = -EINVAL;
  374. goto out;
  375. }
  376. idle = idle_thread_get(cpu);
  377. if (IS_ERR(idle)) {
  378. ret = PTR_ERR(idle);
  379. goto out;
  380. }
  381. ret = smpboot_create_threads(cpu);
  382. if (ret)
  383. goto out;
  384. ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
  385. if (ret) {
  386. nr_calls--;
  387. pr_warn("%s: attempt to bring up CPU %u failed\n",
  388. __func__, cpu);
  389. goto out_notify;
  390. }
  391. /* Arch-specific enabling code. */
  392. ret = __cpu_up(cpu, idle);
  393. if (ret != 0)
  394. goto out_notify;
  395. BUG_ON(!cpu_online(cpu));
  396. /* Wake the per cpu threads */
  397. smpboot_unpark_threads(cpu);
  398. /* Now call notifier in preparation. */
  399. cpu_notify(CPU_ONLINE | mod, hcpu);
  400. out_notify:
  401. if (ret != 0)
  402. __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
  403. out:
  404. cpu_hotplug_done();
  405. return ret;
  406. }
  407. int cpu_up(unsigned int cpu)
  408. {
  409. int err = 0;
  410. if (!cpu_possible(cpu)) {
  411. pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
  412. cpu);
  413. #if defined(CONFIG_IA64)
  414. pr_err("please check additional_cpus= boot parameter\n");
  415. #endif
  416. return -EINVAL;
  417. }
  418. err = try_online_node(cpu_to_node(cpu));
  419. if (err)
  420. return err;
  421. cpu_maps_update_begin();
  422. if (cpu_hotplug_disabled) {
  423. err = -EBUSY;
  424. goto out;
  425. }
  426. err = _cpu_up(cpu, 0);
  427. out:
  428. cpu_maps_update_done();
  429. return err;
  430. }
  431. EXPORT_SYMBOL_GPL(cpu_up);
  432. #ifdef CONFIG_PM_SLEEP_SMP
  433. static cpumask_var_t frozen_cpus;
  434. int disable_nonboot_cpus(void)
  435. {
  436. int cpu, first_cpu, error = 0;
  437. cpu_maps_update_begin();
  438. first_cpu = cpumask_first(cpu_online_mask);
  439. /*
  440. * We take down all of the non-boot CPUs in one shot to avoid races
  441. * with the userspace trying to use the CPU hotplug at the same time
  442. */
  443. cpumask_clear(frozen_cpus);
  444. pr_info("Disabling non-boot CPUs ...\n");
  445. for_each_online_cpu(cpu) {
  446. if (cpu == first_cpu)
  447. continue;
  448. trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
  449. error = _cpu_down(cpu, 1);
  450. trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
  451. if (!error)
  452. cpumask_set_cpu(cpu, frozen_cpus);
  453. else {
  454. pr_err("Error taking CPU%d down: %d\n", cpu, error);
  455. break;
  456. }
  457. }
  458. if (!error) {
  459. BUG_ON(num_online_cpus() > 1);
  460. /* Make sure the CPUs won't be enabled by someone else */
  461. cpu_hotplug_disabled = 1;
  462. } else {
  463. pr_err("Non-boot CPUs are not disabled\n");
  464. }
  465. cpu_maps_update_done();
  466. return error;
  467. }
  468. void __weak arch_enable_nonboot_cpus_begin(void)
  469. {
  470. }
  471. void __weak arch_enable_nonboot_cpus_end(void)
  472. {
  473. }
  474. void __ref enable_nonboot_cpus(void)
  475. {
  476. int cpu, error;
  477. /* Allow everyone to use the CPU hotplug again */
  478. cpu_maps_update_begin();
  479. cpu_hotplug_disabled = 0;
  480. if (cpumask_empty(frozen_cpus))
  481. goto out;
  482. pr_info("Enabling non-boot CPUs ...\n");
  483. arch_enable_nonboot_cpus_begin();
  484. for_each_cpu(cpu, frozen_cpus) {
  485. trace_suspend_resume(TPS("CPU_ON"), cpu, true);
  486. error = _cpu_up(cpu, 1);
  487. trace_suspend_resume(TPS("CPU_ON"), cpu, false);
  488. if (!error) {
  489. pr_info("CPU%d is up\n", cpu);
  490. continue;
  491. }
  492. pr_warn("Error taking CPU%d up: %d\n", cpu, error);
  493. }
  494. arch_enable_nonboot_cpus_end();
  495. cpumask_clear(frozen_cpus);
  496. out:
  497. cpu_maps_update_done();
  498. }
  499. static int __init alloc_frozen_cpus(void)
  500. {
  501. if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
  502. return -ENOMEM;
  503. return 0;
  504. }
  505. core_initcall(alloc_frozen_cpus);
  506. /*
  507. * When callbacks for CPU hotplug notifications are being executed, we must
  508. * ensure that the state of the system with respect to the tasks being frozen
  509. * or not, as reported by the notification, remains unchanged *throughout the
  510. * duration* of the execution of the callbacks.
  511. * Hence we need to prevent the freezer from racing with regular CPU hotplug.
  512. *
  513. * This synchronization is implemented by mutually excluding regular CPU
  514. * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
  515. * Hibernate notifications.
  516. */
  517. static int
  518. cpu_hotplug_pm_callback(struct notifier_block *nb,
  519. unsigned long action, void *ptr)
  520. {
  521. switch (action) {
  522. case PM_SUSPEND_PREPARE:
  523. case PM_HIBERNATION_PREPARE:
  524. cpu_hotplug_disable();
  525. break;
  526. case PM_POST_SUSPEND:
  527. case PM_POST_HIBERNATION:
  528. cpu_hotplug_enable();
  529. break;
  530. default:
  531. return NOTIFY_DONE;
  532. }
  533. return NOTIFY_OK;
  534. }
  535. static int __init cpu_hotplug_pm_sync_init(void)
  536. {
  537. /*
  538. * cpu_hotplug_pm_callback has higher priority than x86
  539. * bsp_pm_callback which depends on cpu_hotplug_pm_callback
  540. * to disable cpu hotplug to avoid cpu hotplug race.
  541. */
  542. pm_notifier(cpu_hotplug_pm_callback, 0);
  543. return 0;
  544. }
  545. core_initcall(cpu_hotplug_pm_sync_init);
  546. #endif /* CONFIG_PM_SLEEP_SMP */
  547. /**
  548. * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
  549. * @cpu: cpu that just started
  550. *
  551. * This function calls the cpu_chain notifiers with CPU_STARTING.
  552. * It must be called by the arch code on the new cpu, before the new cpu
  553. * enables interrupts and before the "boot" cpu returns from __cpu_up().
  554. */
  555. void notify_cpu_starting(unsigned int cpu)
  556. {
  557. unsigned long val = CPU_STARTING;
  558. #ifdef CONFIG_PM_SLEEP_SMP
  559. if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
  560. val = CPU_STARTING_FROZEN;
  561. #endif /* CONFIG_PM_SLEEP_SMP */
  562. cpu_notify(val, (void *)(long)cpu);
  563. }
  564. #endif /* CONFIG_SMP */
  565. /*
  566. * cpu_bit_bitmap[] is a special, "compressed" data structure that
  567. * represents all NR_CPUS bits binary values of 1<<nr.
  568. *
  569. * It is used by cpumask_of() to get a constant address to a CPU
  570. * mask value that has a single bit set only.
  571. */
  572. /* cpu_bit_bitmap[0] is empty - so we can back into it */
  573. #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
  574. #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
  575. #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
  576. #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
  577. const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
  578. MASK_DECLARE_8(0), MASK_DECLARE_8(8),
  579. MASK_DECLARE_8(16), MASK_DECLARE_8(24),
  580. #if BITS_PER_LONG > 32
  581. MASK_DECLARE_8(32), MASK_DECLARE_8(40),
  582. MASK_DECLARE_8(48), MASK_DECLARE_8(56),
  583. #endif
  584. };
  585. EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
  586. const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
  587. EXPORT_SYMBOL(cpu_all_bits);
  588. #ifdef CONFIG_INIT_ALL_POSSIBLE
  589. static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
  590. = CPU_BITS_ALL;
  591. #else
  592. static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
  593. #endif
  594. const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
  595. EXPORT_SYMBOL(cpu_possible_mask);
  596. static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
  597. const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
  598. EXPORT_SYMBOL(cpu_online_mask);
  599. static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
  600. const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
  601. EXPORT_SYMBOL(cpu_present_mask);
  602. static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
  603. const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
  604. EXPORT_SYMBOL(cpu_active_mask);
  605. void set_cpu_possible(unsigned int cpu, bool possible)
  606. {
  607. if (possible)
  608. cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
  609. else
  610. cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
  611. }
  612. void set_cpu_present(unsigned int cpu, bool present)
  613. {
  614. if (present)
  615. cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
  616. else
  617. cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
  618. }
  619. void set_cpu_online(unsigned int cpu, bool online)
  620. {
  621. if (online) {
  622. cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
  623. cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
  624. } else {
  625. cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
  626. }
  627. }
  628. void set_cpu_active(unsigned int cpu, bool active)
  629. {
  630. if (active)
  631. cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
  632. else
  633. cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
  634. }
  635. void init_cpu_present(const struct cpumask *src)
  636. {
  637. cpumask_copy(to_cpumask(cpu_present_bits), src);
  638. }
  639. void init_cpu_possible(const struct cpumask *src)
  640. {
  641. cpumask_copy(to_cpumask(cpu_possible_bits), src);
  642. }
  643. void init_cpu_online(const struct cpumask *src)
  644. {
  645. cpumask_copy(to_cpumask(cpu_online_bits), src);
  646. }