cpu.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017
  1. /* CPU control.
  2. * (C) 2001, 2002, 2003, 2004 Rusty Russell
  3. *
  4. * This code is licenced under the GPL.
  5. */
  6. #include <linux/proc_fs.h>
  7. #include <linux/smp.h>
  8. #include <linux/init.h>
  9. #include <linux/notifier.h>
  10. #include <linux/sched/signal.h>
  11. #include <linux/sched/hotplug.h>
  12. #include <linux/sched/task.h>
  13. #include <linux/unistd.h>
  14. #include <linux/cpu.h>
  15. #include <linux/oom.h>
  16. #include <linux/rcupdate.h>
  17. #include <linux/export.h>
  18. #include <linux/bug.h>
  19. #include <linux/kthread.h>
  20. #include <linux/stop_machine.h>
  21. #include <linux/mutex.h>
  22. #include <linux/gfp.h>
  23. #include <linux/suspend.h>
  24. #include <linux/lockdep.h>
  25. #include <linux/tick.h>
  26. #include <linux/irq.h>
  27. #include <linux/smpboot.h>
  28. #include <linux/relay.h>
  29. #include <linux/slab.h>
  30. #include <linux/percpu-rwsem.h>
  31. #include <trace/events/power.h>
  32. #define CREATE_TRACE_POINTS
  33. #include <trace/events/cpuhp.h>
  34. #include "smpboot.h"
  35. /**
  36. * cpuhp_cpu_state - Per cpu hotplug state storage
  37. * @state: The current cpu state
  38. * @target: The target state
  39. * @thread: Pointer to the hotplug thread
  40. * @should_run: Thread should execute
  41. * @rollback: Perform a rollback
  42. * @single: Single callback invocation
  43. * @bringup: Single callback bringup or teardown selector
  44. * @cb_state: The state for a single callback (install/uninstall)
  45. * @result: Result of the operation
  46. * @done_up: Signal completion to the issuer of the task for cpu-up
  47. * @done_down: Signal completion to the issuer of the task for cpu-down
  48. */
  49. struct cpuhp_cpu_state {
  50. enum cpuhp_state state;
  51. enum cpuhp_state target;
  52. enum cpuhp_state fail;
  53. #ifdef CONFIG_SMP
  54. struct task_struct *thread;
  55. bool should_run;
  56. bool rollback;
  57. bool single;
  58. bool bringup;
  59. struct hlist_node *node;
  60. struct hlist_node *last;
  61. enum cpuhp_state cb_state;
  62. int result;
  63. struct completion done_up;
  64. struct completion done_down;
  65. #endif
  66. };
  67. static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
  68. .fail = CPUHP_INVALID,
  69. };
  70. #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
  71. static struct lockdep_map cpuhp_state_up_map =
  72. STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
  73. static struct lockdep_map cpuhp_state_down_map =
  74. STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
  75. static void inline cpuhp_lock_acquire(bool bringup)
  76. {
  77. lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
  78. }
  79. static void inline cpuhp_lock_release(bool bringup)
  80. {
  81. lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
  82. }
  83. #else
  84. static void inline cpuhp_lock_acquire(bool bringup) { }
  85. static void inline cpuhp_lock_release(bool bringup) { }
  86. #endif
  87. /**
  88. * cpuhp_step - Hotplug state machine step
  89. * @name: Name of the step
  90. * @startup: Startup function of the step
  91. * @teardown: Teardown function of the step
  92. * @skip_onerr: Do not invoke the functions on error rollback
  93. * Will go away once the notifiers are gone
  94. * @cant_stop: Bringup/teardown can't be stopped at this step
  95. */
  96. struct cpuhp_step {
  97. const char *name;
  98. union {
  99. int (*single)(unsigned int cpu);
  100. int (*multi)(unsigned int cpu,
  101. struct hlist_node *node);
  102. } startup;
  103. union {
  104. int (*single)(unsigned int cpu);
  105. int (*multi)(unsigned int cpu,
  106. struct hlist_node *node);
  107. } teardown;
  108. struct hlist_head list;
  109. bool skip_onerr;
  110. bool cant_stop;
  111. bool multi_instance;
  112. };
  113. static DEFINE_MUTEX(cpuhp_state_mutex);
  114. static struct cpuhp_step cpuhp_bp_states[];
  115. static struct cpuhp_step cpuhp_ap_states[];
  116. static bool cpuhp_is_ap_state(enum cpuhp_state state)
  117. {
  118. /*
  119. * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
  120. * purposes as that state is handled explicitly in cpu_down.
  121. */
  122. return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
  123. }
  124. static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
  125. {
  126. struct cpuhp_step *sp;
  127. sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
  128. return sp + state;
  129. }
  130. /**
  131. * cpuhp_invoke_callback _ Invoke the callbacks for a given state
  132. * @cpu: The cpu for which the callback should be invoked
  133. * @state: The state to do callbacks for
  134. * @bringup: True if the bringup callback should be invoked
  135. * @node: For multi-instance, do a single entry callback for install/remove
  136. * @lastp: For multi-instance rollback, remember how far we got
  137. *
  138. * Called from cpu hotplug and from the state register machinery.
  139. */
  140. static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
  141. bool bringup, struct hlist_node *node,
  142. struct hlist_node **lastp)
  143. {
  144. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  145. struct cpuhp_step *step = cpuhp_get_step(state);
  146. int (*cbm)(unsigned int cpu, struct hlist_node *node);
  147. int (*cb)(unsigned int cpu);
  148. int ret, cnt;
  149. if (st->fail == state) {
  150. st->fail = CPUHP_INVALID;
  151. if (!(bringup ? step->startup.single : step->teardown.single))
  152. return 0;
  153. return -EAGAIN;
  154. }
  155. if (!step->multi_instance) {
  156. WARN_ON_ONCE(lastp && *lastp);
  157. cb = bringup ? step->startup.single : step->teardown.single;
  158. if (!cb)
  159. return 0;
  160. trace_cpuhp_enter(cpu, st->target, state, cb);
  161. ret = cb(cpu);
  162. trace_cpuhp_exit(cpu, st->state, state, ret);
  163. return ret;
  164. }
  165. cbm = bringup ? step->startup.multi : step->teardown.multi;
  166. if (!cbm)
  167. return 0;
  168. /* Single invocation for instance add/remove */
  169. if (node) {
  170. WARN_ON_ONCE(lastp && *lastp);
  171. trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
  172. ret = cbm(cpu, node);
  173. trace_cpuhp_exit(cpu, st->state, state, ret);
  174. return ret;
  175. }
  176. /* State transition. Invoke on all instances */
  177. cnt = 0;
  178. hlist_for_each(node, &step->list) {
  179. if (lastp && node == *lastp)
  180. break;
  181. trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
  182. ret = cbm(cpu, node);
  183. trace_cpuhp_exit(cpu, st->state, state, ret);
  184. if (ret) {
  185. if (!lastp)
  186. goto err;
  187. *lastp = node;
  188. return ret;
  189. }
  190. cnt++;
  191. }
  192. if (lastp)
  193. *lastp = NULL;
  194. return 0;
  195. err:
  196. /* Rollback the instances if one failed */
  197. cbm = !bringup ? step->startup.multi : step->teardown.multi;
  198. if (!cbm)
  199. return ret;
  200. hlist_for_each(node, &step->list) {
  201. if (!cnt--)
  202. break;
  203. trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
  204. ret = cbm(cpu, node);
  205. trace_cpuhp_exit(cpu, st->state, state, ret);
  206. /*
  207. * Rollback must not fail,
  208. */
  209. WARN_ON_ONCE(ret);
  210. }
  211. return ret;
  212. }
  213. #ifdef CONFIG_SMP
  214. static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
  215. {
  216. struct completion *done = bringup ? &st->done_up : &st->done_down;
  217. wait_for_completion(done);
  218. }
  219. static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
  220. {
  221. struct completion *done = bringup ? &st->done_up : &st->done_down;
  222. complete(done);
  223. }
  224. /*
  225. * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
  226. */
  227. static bool cpuhp_is_atomic_state(enum cpuhp_state state)
  228. {
  229. return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
  230. }
  231. /* Serializes the updates to cpu_online_mask, cpu_present_mask */
  232. static DEFINE_MUTEX(cpu_add_remove_lock);
  233. bool cpuhp_tasks_frozen;
  234. EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
  235. /*
  236. * The following two APIs (cpu_maps_update_begin/done) must be used when
  237. * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
  238. */
  239. void cpu_maps_update_begin(void)
  240. {
  241. mutex_lock(&cpu_add_remove_lock);
  242. }
  243. void cpu_maps_update_done(void)
  244. {
  245. mutex_unlock(&cpu_add_remove_lock);
  246. }
  247. /*
  248. * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  249. * Should always be manipulated under cpu_add_remove_lock
  250. */
  251. static int cpu_hotplug_disabled;
  252. #ifdef CONFIG_HOTPLUG_CPU
  253. DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
  254. void cpus_read_lock(void)
  255. {
  256. percpu_down_read(&cpu_hotplug_lock);
  257. }
  258. EXPORT_SYMBOL_GPL(cpus_read_lock);
  259. void cpus_read_unlock(void)
  260. {
  261. percpu_up_read(&cpu_hotplug_lock);
  262. }
  263. EXPORT_SYMBOL_GPL(cpus_read_unlock);
  264. void cpus_write_lock(void)
  265. {
  266. percpu_down_write(&cpu_hotplug_lock);
  267. }
  268. void cpus_write_unlock(void)
  269. {
  270. percpu_up_write(&cpu_hotplug_lock);
  271. }
  272. void lockdep_assert_cpus_held(void)
  273. {
  274. percpu_rwsem_assert_held(&cpu_hotplug_lock);
  275. }
  276. /*
  277. * Wait for currently running CPU hotplug operations to complete (if any) and
  278. * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
  279. * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
  280. * hotplug path before performing hotplug operations. So acquiring that lock
  281. * guarantees mutual exclusion from any currently running hotplug operations.
  282. */
  283. void cpu_hotplug_disable(void)
  284. {
  285. cpu_maps_update_begin();
  286. cpu_hotplug_disabled++;
  287. cpu_maps_update_done();
  288. }
  289. EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
  290. static void __cpu_hotplug_enable(void)
  291. {
  292. if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
  293. return;
  294. cpu_hotplug_disabled--;
  295. }
  296. void cpu_hotplug_enable(void)
  297. {
  298. cpu_maps_update_begin();
  299. __cpu_hotplug_enable();
  300. cpu_maps_update_done();
  301. }
  302. EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
  303. #endif /* CONFIG_HOTPLUG_CPU */
  304. static inline enum cpuhp_state
  305. cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
  306. {
  307. enum cpuhp_state prev_state = st->state;
  308. st->rollback = false;
  309. st->last = NULL;
  310. st->target = target;
  311. st->single = false;
  312. st->bringup = st->state < target;
  313. return prev_state;
  314. }
  315. static inline void
  316. cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
  317. {
  318. st->rollback = true;
  319. /*
  320. * If we have st->last we need to undo partial multi_instance of this
  321. * state first. Otherwise start undo at the previous state.
  322. */
  323. if (!st->last) {
  324. if (st->bringup)
  325. st->state--;
  326. else
  327. st->state++;
  328. }
  329. st->target = prev_state;
  330. st->bringup = !st->bringup;
  331. }
  332. /* Regular hotplug invocation of the AP hotplug thread */
  333. static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
  334. {
  335. if (!st->single && st->state == st->target)
  336. return;
  337. st->result = 0;
  338. /*
  339. * Make sure the above stores are visible before should_run becomes
  340. * true. Paired with the mb() above in cpuhp_thread_fun()
  341. */
  342. smp_mb();
  343. st->should_run = true;
  344. wake_up_process(st->thread);
  345. wait_for_ap_thread(st, st->bringup);
  346. }
  347. static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
  348. {
  349. enum cpuhp_state prev_state;
  350. int ret;
  351. prev_state = cpuhp_set_state(st, target);
  352. __cpuhp_kick_ap(st);
  353. if ((ret = st->result)) {
  354. cpuhp_reset_state(st, prev_state);
  355. __cpuhp_kick_ap(st);
  356. }
  357. return ret;
  358. }
  359. static int bringup_wait_for_ap(unsigned int cpu)
  360. {
  361. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  362. /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
  363. wait_for_ap_thread(st, true);
  364. if (WARN_ON_ONCE((!cpu_online(cpu))))
  365. return -ECANCELED;
  366. /* Unpark the stopper thread and the hotplug thread of the target cpu */
  367. stop_machine_unpark(cpu);
  368. kthread_unpark(st->thread);
  369. if (st->target <= CPUHP_AP_ONLINE_IDLE)
  370. return 0;
  371. return cpuhp_kick_ap(st, st->target);
  372. }
  373. static int bringup_cpu(unsigned int cpu)
  374. {
  375. struct task_struct *idle = idle_thread_get(cpu);
  376. int ret;
  377. /*
  378. * Some architectures have to walk the irq descriptors to
  379. * setup the vector space for the cpu which comes online.
  380. * Prevent irq alloc/free across the bringup.
  381. */
  382. irq_lock_sparse();
  383. /* Arch-specific enabling code. */
  384. ret = __cpu_up(cpu, idle);
  385. irq_unlock_sparse();
  386. if (ret)
  387. return ret;
  388. return bringup_wait_for_ap(cpu);
  389. }
  390. /*
  391. * Hotplug state machine related functions
  392. */
  393. static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
  394. {
  395. for (st->state--; st->state > st->target; st->state--) {
  396. struct cpuhp_step *step = cpuhp_get_step(st->state);
  397. if (!step->skip_onerr)
  398. cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
  399. }
  400. }
  401. static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
  402. enum cpuhp_state target)
  403. {
  404. enum cpuhp_state prev_state = st->state;
  405. int ret = 0;
  406. while (st->state < target) {
  407. st->state++;
  408. ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
  409. if (ret) {
  410. st->target = prev_state;
  411. undo_cpu_up(cpu, st);
  412. break;
  413. }
  414. }
  415. return ret;
  416. }
  417. /*
  418. * The cpu hotplug threads manage the bringup and teardown of the cpus
  419. */
  420. static void cpuhp_create(unsigned int cpu)
  421. {
  422. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  423. init_completion(&st->done_up);
  424. init_completion(&st->done_down);
  425. }
  426. static int cpuhp_should_run(unsigned int cpu)
  427. {
  428. struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  429. return st->should_run;
  430. }
  431. /*
  432. * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
  433. * callbacks when a state gets [un]installed at runtime.
  434. *
  435. * Each invocation of this function by the smpboot thread does a single AP
  436. * state callback.
  437. *
  438. * It has 3 modes of operation:
  439. * - single: runs st->cb_state
  440. * - up: runs ++st->state, while st->state < st->target
  441. * - down: runs st->state--, while st->state > st->target
  442. *
  443. * When complete or on error, should_run is cleared and the completion is fired.
  444. */
  445. static void cpuhp_thread_fun(unsigned int cpu)
  446. {
  447. struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  448. bool bringup = st->bringup;
  449. enum cpuhp_state state;
  450. /*
  451. * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
  452. * that if we see ->should_run we also see the rest of the state.
  453. */
  454. smp_mb();
  455. if (WARN_ON_ONCE(!st->should_run))
  456. return;
  457. cpuhp_lock_acquire(bringup);
  458. if (st->single) {
  459. state = st->cb_state;
  460. st->should_run = false;
  461. } else {
  462. if (bringup) {
  463. st->state++;
  464. state = st->state;
  465. st->should_run = (st->state < st->target);
  466. WARN_ON_ONCE(st->state > st->target);
  467. } else {
  468. state = st->state;
  469. st->state--;
  470. st->should_run = (st->state > st->target);
  471. WARN_ON_ONCE(st->state < st->target);
  472. }
  473. }
  474. WARN_ON_ONCE(!cpuhp_is_ap_state(state));
  475. if (st->rollback) {
  476. struct cpuhp_step *step = cpuhp_get_step(state);
  477. if (step->skip_onerr)
  478. goto next;
  479. }
  480. if (cpuhp_is_atomic_state(state)) {
  481. local_irq_disable();
  482. st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
  483. local_irq_enable();
  484. /*
  485. * STARTING/DYING must not fail!
  486. */
  487. WARN_ON_ONCE(st->result);
  488. } else {
  489. st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
  490. }
  491. if (st->result) {
  492. /*
  493. * If we fail on a rollback, we're up a creek without no
  494. * paddle, no way forward, no way back. We loose, thanks for
  495. * playing.
  496. */
  497. WARN_ON_ONCE(st->rollback);
  498. st->should_run = false;
  499. }
  500. next:
  501. cpuhp_lock_release(bringup);
  502. if (!st->should_run)
  503. complete_ap_thread(st, bringup);
  504. }
  505. /* Invoke a single callback on a remote cpu */
  506. static int
  507. cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
  508. struct hlist_node *node)
  509. {
  510. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  511. int ret;
  512. if (!cpu_online(cpu))
  513. return 0;
  514. cpuhp_lock_acquire(false);
  515. cpuhp_lock_release(false);
  516. cpuhp_lock_acquire(true);
  517. cpuhp_lock_release(true);
  518. /*
  519. * If we are up and running, use the hotplug thread. For early calls
  520. * we invoke the thread function directly.
  521. */
  522. if (!st->thread)
  523. return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
  524. st->rollback = false;
  525. st->last = NULL;
  526. st->node = node;
  527. st->bringup = bringup;
  528. st->cb_state = state;
  529. st->single = true;
  530. __cpuhp_kick_ap(st);
  531. /*
  532. * If we failed and did a partial, do a rollback.
  533. */
  534. if ((ret = st->result) && st->last) {
  535. st->rollback = true;
  536. st->bringup = !bringup;
  537. __cpuhp_kick_ap(st);
  538. }
  539. return ret;
  540. }
  541. static int cpuhp_kick_ap_work(unsigned int cpu)
  542. {
  543. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  544. enum cpuhp_state prev_state = st->state;
  545. int ret;
  546. cpuhp_lock_acquire(false);
  547. cpuhp_lock_release(false);
  548. cpuhp_lock_acquire(true);
  549. cpuhp_lock_release(true);
  550. trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
  551. ret = cpuhp_kick_ap(st, st->target);
  552. trace_cpuhp_exit(cpu, st->state, prev_state, ret);
  553. return ret;
  554. }
  555. static struct smp_hotplug_thread cpuhp_threads = {
  556. .store = &cpuhp_state.thread,
  557. .create = &cpuhp_create,
  558. .thread_should_run = cpuhp_should_run,
  559. .thread_fn = cpuhp_thread_fun,
  560. .thread_comm = "cpuhp/%u",
  561. .selfparking = true,
  562. };
  563. void __init cpuhp_threads_init(void)
  564. {
  565. BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
  566. kthread_unpark(this_cpu_read(cpuhp_state.thread));
  567. }
  568. #ifdef CONFIG_HOTPLUG_CPU
  569. /**
  570. * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
  571. * @cpu: a CPU id
  572. *
  573. * This function walks all processes, finds a valid mm struct for each one and
  574. * then clears a corresponding bit in mm's cpumask. While this all sounds
  575. * trivial, there are various non-obvious corner cases, which this function
  576. * tries to solve in a safe manner.
  577. *
  578. * Also note that the function uses a somewhat relaxed locking scheme, so it may
  579. * be called only for an already offlined CPU.
  580. */
  581. void clear_tasks_mm_cpumask(int cpu)
  582. {
  583. struct task_struct *p;
  584. /*
  585. * This function is called after the cpu is taken down and marked
  586. * offline, so its not like new tasks will ever get this cpu set in
  587. * their mm mask. -- Peter Zijlstra
  588. * Thus, we may use rcu_read_lock() here, instead of grabbing
  589. * full-fledged tasklist_lock.
  590. */
  591. WARN_ON(cpu_online(cpu));
  592. rcu_read_lock();
  593. for_each_process(p) {
  594. struct task_struct *t;
  595. /*
  596. * Main thread might exit, but other threads may still have
  597. * a valid mm. Find one.
  598. */
  599. t = find_lock_task_mm(p);
  600. if (!t)
  601. continue;
  602. cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
  603. task_unlock(t);
  604. }
  605. rcu_read_unlock();
  606. }
  607. /* Take this CPU down. */
  608. static int take_cpu_down(void *_param)
  609. {
  610. struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  611. enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
  612. int err, cpu = smp_processor_id();
  613. int ret;
  614. /* Ensure this CPU doesn't handle any more interrupts. */
  615. err = __cpu_disable();
  616. if (err < 0)
  617. return err;
  618. /*
  619. * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
  620. * do this step again.
  621. */
  622. WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
  623. st->state--;
  624. /* Invoke the former CPU_DYING callbacks */
  625. for (; st->state > target; st->state--) {
  626. ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
  627. /*
  628. * DYING must not fail!
  629. */
  630. WARN_ON_ONCE(ret);
  631. }
  632. /* Give up timekeeping duties */
  633. tick_handover_do_timer();
  634. /* Park the stopper thread */
  635. stop_machine_park(cpu);
  636. return 0;
  637. }
  638. static int takedown_cpu(unsigned int cpu)
  639. {
  640. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  641. int err;
  642. /* Park the smpboot threads */
  643. kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
  644. smpboot_park_threads(cpu);
  645. /*
  646. * Prevent irq alloc/free while the dying cpu reorganizes the
  647. * interrupt affinities.
  648. */
  649. irq_lock_sparse();
  650. /*
  651. * So now all preempt/rcu users must observe !cpu_active().
  652. */
  653. err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
  654. if (err) {
  655. /* CPU refused to die */
  656. irq_unlock_sparse();
  657. /* Unpark the hotplug thread so we can rollback there */
  658. kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
  659. return err;
  660. }
  661. BUG_ON(cpu_online(cpu));
  662. /*
  663. * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
  664. * runnable tasks from the cpu, there's only the idle task left now
  665. * that the migration thread is done doing the stop_machine thing.
  666. *
  667. * Wait for the stop thread to go away.
  668. */
  669. wait_for_ap_thread(st, false);
  670. BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
  671. /* Interrupts are moved away from the dying cpu, reenable alloc/free */
  672. irq_unlock_sparse();
  673. hotplug_cpu__broadcast_tick_pull(cpu);
  674. /* This actually kills the CPU. */
  675. __cpu_die(cpu);
  676. tick_cleanup_dead_cpu(cpu);
  677. rcutree_migrate_callbacks(cpu);
  678. return 0;
  679. }
  680. static void cpuhp_complete_idle_dead(void *arg)
  681. {
  682. struct cpuhp_cpu_state *st = arg;
  683. complete_ap_thread(st, false);
  684. }
  685. void cpuhp_report_idle_dead(void)
  686. {
  687. struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  688. BUG_ON(st->state != CPUHP_AP_OFFLINE);
  689. rcu_report_dead(smp_processor_id());
  690. st->state = CPUHP_AP_IDLE_DEAD;
  691. /*
  692. * We cannot call complete after rcu_report_dead() so we delegate it
  693. * to an online cpu.
  694. */
  695. smp_call_function_single(cpumask_first(cpu_online_mask),
  696. cpuhp_complete_idle_dead, st, 0);
  697. }
  698. static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
  699. {
  700. for (st->state++; st->state < st->target; st->state++) {
  701. struct cpuhp_step *step = cpuhp_get_step(st->state);
  702. if (!step->skip_onerr)
  703. cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
  704. }
  705. }
  706. static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
  707. enum cpuhp_state target)
  708. {
  709. enum cpuhp_state prev_state = st->state;
  710. int ret = 0;
  711. for (; st->state > target; st->state--) {
  712. ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
  713. if (ret) {
  714. st->target = prev_state;
  715. undo_cpu_down(cpu, st);
  716. break;
  717. }
  718. }
  719. return ret;
  720. }
  721. /* Requires cpu_add_remove_lock to be held */
  722. static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
  723. enum cpuhp_state target)
  724. {
  725. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  726. int prev_state, ret = 0;
  727. if (num_online_cpus() == 1)
  728. return -EBUSY;
  729. if (!cpu_present(cpu))
  730. return -EINVAL;
  731. cpus_write_lock();
  732. cpuhp_tasks_frozen = tasks_frozen;
  733. prev_state = cpuhp_set_state(st, target);
  734. /*
  735. * If the current CPU state is in the range of the AP hotplug thread,
  736. * then we need to kick the thread.
  737. */
  738. if (st->state > CPUHP_TEARDOWN_CPU) {
  739. st->target = max((int)target, CPUHP_TEARDOWN_CPU);
  740. ret = cpuhp_kick_ap_work(cpu);
  741. /*
  742. * The AP side has done the error rollback already. Just
  743. * return the error code..
  744. */
  745. if (ret)
  746. goto out;
  747. /*
  748. * We might have stopped still in the range of the AP hotplug
  749. * thread. Nothing to do anymore.
  750. */
  751. if (st->state > CPUHP_TEARDOWN_CPU)
  752. goto out;
  753. st->target = target;
  754. }
  755. /*
  756. * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
  757. * to do the further cleanups.
  758. */
  759. ret = cpuhp_down_callbacks(cpu, st, target);
  760. if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
  761. cpuhp_reset_state(st, prev_state);
  762. __cpuhp_kick_ap(st);
  763. }
  764. out:
  765. cpus_write_unlock();
  766. return ret;
  767. }
  768. static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
  769. {
  770. int err;
  771. cpu_maps_update_begin();
  772. if (cpu_hotplug_disabled) {
  773. err = -EBUSY;
  774. goto out;
  775. }
  776. err = _cpu_down(cpu, 0, target);
  777. out:
  778. cpu_maps_update_done();
  779. return err;
  780. }
  781. int cpu_down(unsigned int cpu)
  782. {
  783. return do_cpu_down(cpu, CPUHP_OFFLINE);
  784. }
  785. EXPORT_SYMBOL(cpu_down);
  786. #else
  787. #define takedown_cpu NULL
  788. #endif /*CONFIG_HOTPLUG_CPU*/
  789. /**
  790. * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
  791. * @cpu: cpu that just started
  792. *
  793. * It must be called by the arch code on the new cpu, before the new cpu
  794. * enables interrupts and before the "boot" cpu returns from __cpu_up().
  795. */
  796. void notify_cpu_starting(unsigned int cpu)
  797. {
  798. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  799. enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
  800. int ret;
  801. rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
  802. while (st->state < target) {
  803. st->state++;
  804. ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
  805. /*
  806. * STARTING must not fail!
  807. */
  808. WARN_ON_ONCE(ret);
  809. }
  810. }
  811. /*
  812. * Called from the idle task. Wake up the controlling task which brings the
  813. * stopper and the hotplug thread of the upcoming CPU up and then delegates
  814. * the rest of the online bringup to the hotplug thread.
  815. */
  816. void cpuhp_online_idle(enum cpuhp_state state)
  817. {
  818. struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  819. /* Happens for the boot cpu */
  820. if (state != CPUHP_AP_ONLINE_IDLE)
  821. return;
  822. st->state = CPUHP_AP_ONLINE_IDLE;
  823. complete_ap_thread(st, true);
  824. }
  825. /* Requires cpu_add_remove_lock to be held */
  826. static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
  827. {
  828. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  829. struct task_struct *idle;
  830. int ret = 0;
  831. cpus_write_lock();
  832. if (!cpu_present(cpu)) {
  833. ret = -EINVAL;
  834. goto out;
  835. }
  836. /*
  837. * The caller of do_cpu_up might have raced with another
  838. * caller. Ignore it for now.
  839. */
  840. if (st->state >= target)
  841. goto out;
  842. if (st->state == CPUHP_OFFLINE) {
  843. /* Let it fail before we try to bring the cpu up */
  844. idle = idle_thread_get(cpu);
  845. if (IS_ERR(idle)) {
  846. ret = PTR_ERR(idle);
  847. goto out;
  848. }
  849. }
  850. cpuhp_tasks_frozen = tasks_frozen;
  851. cpuhp_set_state(st, target);
  852. /*
  853. * If the current CPU state is in the range of the AP hotplug thread,
  854. * then we need to kick the thread once more.
  855. */
  856. if (st->state > CPUHP_BRINGUP_CPU) {
  857. ret = cpuhp_kick_ap_work(cpu);
  858. /*
  859. * The AP side has done the error rollback already. Just
  860. * return the error code..
  861. */
  862. if (ret)
  863. goto out;
  864. }
  865. /*
  866. * Try to reach the target state. We max out on the BP at
  867. * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
  868. * responsible for bringing it up to the target state.
  869. */
  870. target = min((int)target, CPUHP_BRINGUP_CPU);
  871. ret = cpuhp_up_callbacks(cpu, st, target);
  872. out:
  873. cpus_write_unlock();
  874. return ret;
  875. }
  876. static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
  877. {
  878. int err = 0;
  879. if (!cpu_possible(cpu)) {
  880. pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
  881. cpu);
  882. #if defined(CONFIG_IA64)
  883. pr_err("please check additional_cpus= boot parameter\n");
  884. #endif
  885. return -EINVAL;
  886. }
  887. err = try_online_node(cpu_to_node(cpu));
  888. if (err)
  889. return err;
  890. cpu_maps_update_begin();
  891. if (cpu_hotplug_disabled) {
  892. err = -EBUSY;
  893. goto out;
  894. }
  895. err = _cpu_up(cpu, 0, target);
  896. out:
  897. cpu_maps_update_done();
  898. return err;
  899. }
  900. int cpu_up(unsigned int cpu)
  901. {
  902. return do_cpu_up(cpu, CPUHP_ONLINE);
  903. }
  904. EXPORT_SYMBOL_GPL(cpu_up);
  905. #ifdef CONFIG_PM_SLEEP_SMP
  906. static cpumask_var_t frozen_cpus;
  907. int freeze_secondary_cpus(int primary)
  908. {
  909. int cpu, error = 0;
  910. cpu_maps_update_begin();
  911. if (!cpu_online(primary))
  912. primary = cpumask_first(cpu_online_mask);
  913. /*
  914. * We take down all of the non-boot CPUs in one shot to avoid races
  915. * with the userspace trying to use the CPU hotplug at the same time
  916. */
  917. cpumask_clear(frozen_cpus);
  918. pr_info("Disabling non-boot CPUs ...\n");
  919. for_each_online_cpu(cpu) {
  920. if (cpu == primary)
  921. continue;
  922. trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
  923. error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
  924. trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
  925. if (!error)
  926. cpumask_set_cpu(cpu, frozen_cpus);
  927. else {
  928. pr_err("Error taking CPU%d down: %d\n", cpu, error);
  929. break;
  930. }
  931. }
  932. if (!error)
  933. BUG_ON(num_online_cpus() > 1);
  934. else
  935. pr_err("Non-boot CPUs are not disabled\n");
  936. /*
  937. * Make sure the CPUs won't be enabled by someone else. We need to do
  938. * this even in case of failure as all disable_nonboot_cpus() users are
  939. * supposed to do enable_nonboot_cpus() on the failure path.
  940. */
  941. cpu_hotplug_disabled++;
  942. cpu_maps_update_done();
  943. return error;
  944. }
  945. void __weak arch_enable_nonboot_cpus_begin(void)
  946. {
  947. }
  948. void __weak arch_enable_nonboot_cpus_end(void)
  949. {
  950. }
  951. void enable_nonboot_cpus(void)
  952. {
  953. int cpu, error;
  954. /* Allow everyone to use the CPU hotplug again */
  955. cpu_maps_update_begin();
  956. __cpu_hotplug_enable();
  957. if (cpumask_empty(frozen_cpus))
  958. goto out;
  959. pr_info("Enabling non-boot CPUs ...\n");
  960. arch_enable_nonboot_cpus_begin();
  961. for_each_cpu(cpu, frozen_cpus) {
  962. trace_suspend_resume(TPS("CPU_ON"), cpu, true);
  963. error = _cpu_up(cpu, 1, CPUHP_ONLINE);
  964. trace_suspend_resume(TPS("CPU_ON"), cpu, false);
  965. if (!error) {
  966. pr_info("CPU%d is up\n", cpu);
  967. continue;
  968. }
  969. pr_warn("Error taking CPU%d up: %d\n", cpu, error);
  970. }
  971. arch_enable_nonboot_cpus_end();
  972. cpumask_clear(frozen_cpus);
  973. out:
  974. cpu_maps_update_done();
  975. }
  976. static int __init alloc_frozen_cpus(void)
  977. {
  978. if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
  979. return -ENOMEM;
  980. return 0;
  981. }
  982. core_initcall(alloc_frozen_cpus);
  983. /*
  984. * When callbacks for CPU hotplug notifications are being executed, we must
  985. * ensure that the state of the system with respect to the tasks being frozen
  986. * or not, as reported by the notification, remains unchanged *throughout the
  987. * duration* of the execution of the callbacks.
  988. * Hence we need to prevent the freezer from racing with regular CPU hotplug.
  989. *
  990. * This synchronization is implemented by mutually excluding regular CPU
  991. * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
  992. * Hibernate notifications.
  993. */
  994. static int
  995. cpu_hotplug_pm_callback(struct notifier_block *nb,
  996. unsigned long action, void *ptr)
  997. {
  998. switch (action) {
  999. case PM_SUSPEND_PREPARE:
  1000. case PM_HIBERNATION_PREPARE:
  1001. cpu_hotplug_disable();
  1002. break;
  1003. case PM_POST_SUSPEND:
  1004. case PM_POST_HIBERNATION:
  1005. cpu_hotplug_enable();
  1006. break;
  1007. default:
  1008. return NOTIFY_DONE;
  1009. }
  1010. return NOTIFY_OK;
  1011. }
  1012. static int __init cpu_hotplug_pm_sync_init(void)
  1013. {
  1014. /*
  1015. * cpu_hotplug_pm_callback has higher priority than x86
  1016. * bsp_pm_callback which depends on cpu_hotplug_pm_callback
  1017. * to disable cpu hotplug to avoid cpu hotplug race.
  1018. */
  1019. pm_notifier(cpu_hotplug_pm_callback, 0);
  1020. return 0;
  1021. }
  1022. core_initcall(cpu_hotplug_pm_sync_init);
  1023. #endif /* CONFIG_PM_SLEEP_SMP */
  1024. int __boot_cpu_id;
  1025. #endif /* CONFIG_SMP */
  1026. /* Boot processor state steps */
  1027. static struct cpuhp_step cpuhp_bp_states[] = {
  1028. [CPUHP_OFFLINE] = {
  1029. .name = "offline",
  1030. .startup.single = NULL,
  1031. .teardown.single = NULL,
  1032. },
  1033. #ifdef CONFIG_SMP
  1034. [CPUHP_CREATE_THREADS]= {
  1035. .name = "threads:prepare",
  1036. .startup.single = smpboot_create_threads,
  1037. .teardown.single = NULL,
  1038. .cant_stop = true,
  1039. },
  1040. [CPUHP_PERF_PREPARE] = {
  1041. .name = "perf:prepare",
  1042. .startup.single = perf_event_init_cpu,
  1043. .teardown.single = perf_event_exit_cpu,
  1044. },
  1045. [CPUHP_WORKQUEUE_PREP] = {
  1046. .name = "workqueue:prepare",
  1047. .startup.single = workqueue_prepare_cpu,
  1048. .teardown.single = NULL,
  1049. },
  1050. [CPUHP_HRTIMERS_PREPARE] = {
  1051. .name = "hrtimers:prepare",
  1052. .startup.single = hrtimers_prepare_cpu,
  1053. .teardown.single = hrtimers_dead_cpu,
  1054. },
  1055. [CPUHP_SMPCFD_PREPARE] = {
  1056. .name = "smpcfd:prepare",
  1057. .startup.single = smpcfd_prepare_cpu,
  1058. .teardown.single = smpcfd_dead_cpu,
  1059. },
  1060. [CPUHP_RELAY_PREPARE] = {
  1061. .name = "relay:prepare",
  1062. .startup.single = relay_prepare_cpu,
  1063. .teardown.single = NULL,
  1064. },
  1065. [CPUHP_SLAB_PREPARE] = {
  1066. .name = "slab:prepare",
  1067. .startup.single = slab_prepare_cpu,
  1068. .teardown.single = slab_dead_cpu,
  1069. },
  1070. [CPUHP_RCUTREE_PREP] = {
  1071. .name = "RCU/tree:prepare",
  1072. .startup.single = rcutree_prepare_cpu,
  1073. .teardown.single = rcutree_dead_cpu,
  1074. },
  1075. /*
  1076. * On the tear-down path, timers_dead_cpu() must be invoked
  1077. * before blk_mq_queue_reinit_notify() from notify_dead(),
  1078. * otherwise a RCU stall occurs.
  1079. */
  1080. [CPUHP_TIMERS_DEAD] = {
  1081. .name = "timers:dead",
  1082. .startup.single = NULL,
  1083. .teardown.single = timers_dead_cpu,
  1084. },
  1085. /* Kicks the plugged cpu into life */
  1086. [CPUHP_BRINGUP_CPU] = {
  1087. .name = "cpu:bringup",
  1088. .startup.single = bringup_cpu,
  1089. .teardown.single = NULL,
  1090. .cant_stop = true,
  1091. },
  1092. [CPUHP_AP_SMPCFD_DYING] = {
  1093. .name = "smpcfd:dying",
  1094. .startup.single = NULL,
  1095. .teardown.single = smpcfd_dying_cpu,
  1096. },
  1097. /*
  1098. * Handled on controll processor until the plugged processor manages
  1099. * this itself.
  1100. */
  1101. [CPUHP_TEARDOWN_CPU] = {
  1102. .name = "cpu:teardown",
  1103. .startup.single = NULL,
  1104. .teardown.single = takedown_cpu,
  1105. .cant_stop = true,
  1106. },
  1107. #else
  1108. [CPUHP_BRINGUP_CPU] = { },
  1109. #endif
  1110. };
  1111. /* Application processor state steps */
  1112. static struct cpuhp_step cpuhp_ap_states[] = {
  1113. #ifdef CONFIG_SMP
  1114. /* Final state before CPU kills itself */
  1115. [CPUHP_AP_IDLE_DEAD] = {
  1116. .name = "idle:dead",
  1117. },
  1118. /*
  1119. * Last state before CPU enters the idle loop to die. Transient state
  1120. * for synchronization.
  1121. */
  1122. [CPUHP_AP_OFFLINE] = {
  1123. .name = "ap:offline",
  1124. .cant_stop = true,
  1125. },
  1126. /* First state is scheduler control. Interrupts are disabled */
  1127. [CPUHP_AP_SCHED_STARTING] = {
  1128. .name = "sched:starting",
  1129. .startup.single = sched_cpu_starting,
  1130. .teardown.single = sched_cpu_dying,
  1131. },
  1132. [CPUHP_AP_RCUTREE_DYING] = {
  1133. .name = "RCU/tree:dying",
  1134. .startup.single = NULL,
  1135. .teardown.single = rcutree_dying_cpu,
  1136. },
  1137. /* Entry state on starting. Interrupts enabled from here on. Transient
  1138. * state for synchronsization */
  1139. [CPUHP_AP_ONLINE] = {
  1140. .name = "ap:online",
  1141. },
  1142. /* Handle smpboot threads park/unpark */
  1143. [CPUHP_AP_SMPBOOT_THREADS] = {
  1144. .name = "smpboot/threads:online",
  1145. .startup.single = smpboot_unpark_threads,
  1146. .teardown.single = NULL,
  1147. },
  1148. [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
  1149. .name = "irq/affinity:online",
  1150. .startup.single = irq_affinity_online_cpu,
  1151. .teardown.single = NULL,
  1152. },
  1153. [CPUHP_AP_PERF_ONLINE] = {
  1154. .name = "perf:online",
  1155. .startup.single = perf_event_init_cpu,
  1156. .teardown.single = perf_event_exit_cpu,
  1157. },
  1158. [CPUHP_AP_WORKQUEUE_ONLINE] = {
  1159. .name = "workqueue:online",
  1160. .startup.single = workqueue_online_cpu,
  1161. .teardown.single = workqueue_offline_cpu,
  1162. },
  1163. [CPUHP_AP_RCUTREE_ONLINE] = {
  1164. .name = "RCU/tree:online",
  1165. .startup.single = rcutree_online_cpu,
  1166. .teardown.single = rcutree_offline_cpu,
  1167. },
  1168. #endif
  1169. /*
  1170. * The dynamically registered state space is here
  1171. */
  1172. #ifdef CONFIG_SMP
  1173. /* Last state is scheduler control setting the cpu active */
  1174. [CPUHP_AP_ACTIVE] = {
  1175. .name = "sched:active",
  1176. .startup.single = sched_cpu_activate,
  1177. .teardown.single = sched_cpu_deactivate,
  1178. },
  1179. #endif
  1180. /* CPU is fully up and running. */
  1181. [CPUHP_ONLINE] = {
  1182. .name = "online",
  1183. .startup.single = NULL,
  1184. .teardown.single = NULL,
  1185. },
  1186. };
  1187. /* Sanity check for callbacks */
  1188. static int cpuhp_cb_check(enum cpuhp_state state)
  1189. {
  1190. if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
  1191. return -EINVAL;
  1192. return 0;
  1193. }
  1194. /*
  1195. * Returns a free for dynamic slot assignment of the Online state. The states
  1196. * are protected by the cpuhp_slot_states mutex and an empty slot is identified
  1197. * by having no name assigned.
  1198. */
  1199. static int cpuhp_reserve_state(enum cpuhp_state state)
  1200. {
  1201. enum cpuhp_state i, end;
  1202. struct cpuhp_step *step;
  1203. switch (state) {
  1204. case CPUHP_AP_ONLINE_DYN:
  1205. step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
  1206. end = CPUHP_AP_ONLINE_DYN_END;
  1207. break;
  1208. case CPUHP_BP_PREPARE_DYN:
  1209. step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
  1210. end = CPUHP_BP_PREPARE_DYN_END;
  1211. break;
  1212. default:
  1213. return -EINVAL;
  1214. }
  1215. for (i = state; i <= end; i++, step++) {
  1216. if (!step->name)
  1217. return i;
  1218. }
  1219. WARN(1, "No more dynamic states available for CPU hotplug\n");
  1220. return -ENOSPC;
  1221. }
  1222. static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
  1223. int (*startup)(unsigned int cpu),
  1224. int (*teardown)(unsigned int cpu),
  1225. bool multi_instance)
  1226. {
  1227. /* (Un)Install the callbacks for further cpu hotplug operations */
  1228. struct cpuhp_step *sp;
  1229. int ret = 0;
  1230. /*
  1231. * If name is NULL, then the state gets removed.
  1232. *
  1233. * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
  1234. * the first allocation from these dynamic ranges, so the removal
  1235. * would trigger a new allocation and clear the wrong (already
  1236. * empty) state, leaving the callbacks of the to be cleared state
  1237. * dangling, which causes wreckage on the next hotplug operation.
  1238. */
  1239. if (name && (state == CPUHP_AP_ONLINE_DYN ||
  1240. state == CPUHP_BP_PREPARE_DYN)) {
  1241. ret = cpuhp_reserve_state(state);
  1242. if (ret < 0)
  1243. return ret;
  1244. state = ret;
  1245. }
  1246. sp = cpuhp_get_step(state);
  1247. if (name && sp->name)
  1248. return -EBUSY;
  1249. sp->startup.single = startup;
  1250. sp->teardown.single = teardown;
  1251. sp->name = name;
  1252. sp->multi_instance = multi_instance;
  1253. INIT_HLIST_HEAD(&sp->list);
  1254. return ret;
  1255. }
  1256. static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
  1257. {
  1258. return cpuhp_get_step(state)->teardown.single;
  1259. }
  1260. /*
  1261. * Call the startup/teardown function for a step either on the AP or
  1262. * on the current CPU.
  1263. */
  1264. static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
  1265. struct hlist_node *node)
  1266. {
  1267. struct cpuhp_step *sp = cpuhp_get_step(state);
  1268. int ret;
  1269. /*
  1270. * If there's nothing to do, we done.
  1271. * Relies on the union for multi_instance.
  1272. */
  1273. if ((bringup && !sp->startup.single) ||
  1274. (!bringup && !sp->teardown.single))
  1275. return 0;
  1276. /*
  1277. * The non AP bound callbacks can fail on bringup. On teardown
  1278. * e.g. module removal we crash for now.
  1279. */
  1280. #ifdef CONFIG_SMP
  1281. if (cpuhp_is_ap_state(state))
  1282. ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
  1283. else
  1284. ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
  1285. #else
  1286. ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
  1287. #endif
  1288. BUG_ON(ret && !bringup);
  1289. return ret;
  1290. }
  1291. /*
  1292. * Called from __cpuhp_setup_state on a recoverable failure.
  1293. *
  1294. * Note: The teardown callbacks for rollback are not allowed to fail!
  1295. */
  1296. static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
  1297. struct hlist_node *node)
  1298. {
  1299. int cpu;
  1300. /* Roll back the already executed steps on the other cpus */
  1301. for_each_present_cpu(cpu) {
  1302. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  1303. int cpustate = st->state;
  1304. if (cpu >= failedcpu)
  1305. break;
  1306. /* Did we invoke the startup call on that cpu ? */
  1307. if (cpustate >= state)
  1308. cpuhp_issue_call(cpu, state, false, node);
  1309. }
  1310. }
  1311. int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
  1312. struct hlist_node *node,
  1313. bool invoke)
  1314. {
  1315. struct cpuhp_step *sp;
  1316. int cpu;
  1317. int ret;
  1318. lockdep_assert_cpus_held();
  1319. sp = cpuhp_get_step(state);
  1320. if (sp->multi_instance == false)
  1321. return -EINVAL;
  1322. mutex_lock(&cpuhp_state_mutex);
  1323. if (!invoke || !sp->startup.multi)
  1324. goto add_node;
  1325. /*
  1326. * Try to call the startup callback for each present cpu
  1327. * depending on the hotplug state of the cpu.
  1328. */
  1329. for_each_present_cpu(cpu) {
  1330. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  1331. int cpustate = st->state;
  1332. if (cpustate < state)
  1333. continue;
  1334. ret = cpuhp_issue_call(cpu, state, true, node);
  1335. if (ret) {
  1336. if (sp->teardown.multi)
  1337. cpuhp_rollback_install(cpu, state, node);
  1338. goto unlock;
  1339. }
  1340. }
  1341. add_node:
  1342. ret = 0;
  1343. hlist_add_head(node, &sp->list);
  1344. unlock:
  1345. mutex_unlock(&cpuhp_state_mutex);
  1346. return ret;
  1347. }
  1348. int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
  1349. bool invoke)
  1350. {
  1351. int ret;
  1352. cpus_read_lock();
  1353. ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
  1354. cpus_read_unlock();
  1355. return ret;
  1356. }
  1357. EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
  1358. /**
  1359. * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
  1360. * @state: The state to setup
  1361. * @invoke: If true, the startup function is invoked for cpus where
  1362. * cpu state >= @state
  1363. * @startup: startup callback function
  1364. * @teardown: teardown callback function
  1365. * @multi_instance: State is set up for multiple instances which get
  1366. * added afterwards.
  1367. *
  1368. * The caller needs to hold cpus read locked while calling this function.
  1369. * Returns:
  1370. * On success:
  1371. * Positive state number if @state is CPUHP_AP_ONLINE_DYN
  1372. * 0 for all other states
  1373. * On failure: proper (negative) error code
  1374. */
  1375. int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
  1376. const char *name, bool invoke,
  1377. int (*startup)(unsigned int cpu),
  1378. int (*teardown)(unsigned int cpu),
  1379. bool multi_instance)
  1380. {
  1381. int cpu, ret = 0;
  1382. bool dynstate;
  1383. lockdep_assert_cpus_held();
  1384. if (cpuhp_cb_check(state) || !name)
  1385. return -EINVAL;
  1386. mutex_lock(&cpuhp_state_mutex);
  1387. ret = cpuhp_store_callbacks(state, name, startup, teardown,
  1388. multi_instance);
  1389. dynstate = state == CPUHP_AP_ONLINE_DYN;
  1390. if (ret > 0 && dynstate) {
  1391. state = ret;
  1392. ret = 0;
  1393. }
  1394. if (ret || !invoke || !startup)
  1395. goto out;
  1396. /*
  1397. * Try to call the startup callback for each present cpu
  1398. * depending on the hotplug state of the cpu.
  1399. */
  1400. for_each_present_cpu(cpu) {
  1401. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  1402. int cpustate = st->state;
  1403. if (cpustate < state)
  1404. continue;
  1405. ret = cpuhp_issue_call(cpu, state, true, NULL);
  1406. if (ret) {
  1407. if (teardown)
  1408. cpuhp_rollback_install(cpu, state, NULL);
  1409. cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
  1410. goto out;
  1411. }
  1412. }
  1413. out:
  1414. mutex_unlock(&cpuhp_state_mutex);
  1415. /*
  1416. * If the requested state is CPUHP_AP_ONLINE_DYN, return the
  1417. * dynamically allocated state in case of success.
  1418. */
  1419. if (!ret && dynstate)
  1420. return state;
  1421. return ret;
  1422. }
  1423. EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
  1424. int __cpuhp_setup_state(enum cpuhp_state state,
  1425. const char *name, bool invoke,
  1426. int (*startup)(unsigned int cpu),
  1427. int (*teardown)(unsigned int cpu),
  1428. bool multi_instance)
  1429. {
  1430. int ret;
  1431. cpus_read_lock();
  1432. ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
  1433. teardown, multi_instance);
  1434. cpus_read_unlock();
  1435. return ret;
  1436. }
  1437. EXPORT_SYMBOL(__cpuhp_setup_state);
  1438. int __cpuhp_state_remove_instance(enum cpuhp_state state,
  1439. struct hlist_node *node, bool invoke)
  1440. {
  1441. struct cpuhp_step *sp = cpuhp_get_step(state);
  1442. int cpu;
  1443. BUG_ON(cpuhp_cb_check(state));
  1444. if (!sp->multi_instance)
  1445. return -EINVAL;
  1446. cpus_read_lock();
  1447. mutex_lock(&cpuhp_state_mutex);
  1448. if (!invoke || !cpuhp_get_teardown_cb(state))
  1449. goto remove;
  1450. /*
  1451. * Call the teardown callback for each present cpu depending
  1452. * on the hotplug state of the cpu. This function is not
  1453. * allowed to fail currently!
  1454. */
  1455. for_each_present_cpu(cpu) {
  1456. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  1457. int cpustate = st->state;
  1458. if (cpustate >= state)
  1459. cpuhp_issue_call(cpu, state, false, node);
  1460. }
  1461. remove:
  1462. hlist_del(node);
  1463. mutex_unlock(&cpuhp_state_mutex);
  1464. cpus_read_unlock();
  1465. return 0;
  1466. }
  1467. EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
  1468. /**
  1469. * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
  1470. * @state: The state to remove
  1471. * @invoke: If true, the teardown function is invoked for cpus where
  1472. * cpu state >= @state
  1473. *
  1474. * The caller needs to hold cpus read locked while calling this function.
  1475. * The teardown callback is currently not allowed to fail. Think
  1476. * about module removal!
  1477. */
  1478. void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
  1479. {
  1480. struct cpuhp_step *sp = cpuhp_get_step(state);
  1481. int cpu;
  1482. BUG_ON(cpuhp_cb_check(state));
  1483. lockdep_assert_cpus_held();
  1484. mutex_lock(&cpuhp_state_mutex);
  1485. if (sp->multi_instance) {
  1486. WARN(!hlist_empty(&sp->list),
  1487. "Error: Removing state %d which has instances left.\n",
  1488. state);
  1489. goto remove;
  1490. }
  1491. if (!invoke || !cpuhp_get_teardown_cb(state))
  1492. goto remove;
  1493. /*
  1494. * Call the teardown callback for each present cpu depending
  1495. * on the hotplug state of the cpu. This function is not
  1496. * allowed to fail currently!
  1497. */
  1498. for_each_present_cpu(cpu) {
  1499. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  1500. int cpustate = st->state;
  1501. if (cpustate >= state)
  1502. cpuhp_issue_call(cpu, state, false, NULL);
  1503. }
  1504. remove:
  1505. cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
  1506. mutex_unlock(&cpuhp_state_mutex);
  1507. }
  1508. EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
  1509. void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
  1510. {
  1511. cpus_read_lock();
  1512. __cpuhp_remove_state_cpuslocked(state, invoke);
  1513. cpus_read_unlock();
  1514. }
  1515. EXPORT_SYMBOL(__cpuhp_remove_state);
  1516. #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
  1517. static ssize_t show_cpuhp_state(struct device *dev,
  1518. struct device_attribute *attr, char *buf)
  1519. {
  1520. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  1521. return sprintf(buf, "%d\n", st->state);
  1522. }
  1523. static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
  1524. static ssize_t write_cpuhp_target(struct device *dev,
  1525. struct device_attribute *attr,
  1526. const char *buf, size_t count)
  1527. {
  1528. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  1529. struct cpuhp_step *sp;
  1530. int target, ret;
  1531. ret = kstrtoint(buf, 10, &target);
  1532. if (ret)
  1533. return ret;
  1534. #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
  1535. if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
  1536. return -EINVAL;
  1537. #else
  1538. if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
  1539. return -EINVAL;
  1540. #endif
  1541. ret = lock_device_hotplug_sysfs();
  1542. if (ret)
  1543. return ret;
  1544. mutex_lock(&cpuhp_state_mutex);
  1545. sp = cpuhp_get_step(target);
  1546. ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
  1547. mutex_unlock(&cpuhp_state_mutex);
  1548. if (ret)
  1549. goto out;
  1550. if (st->state < target)
  1551. ret = do_cpu_up(dev->id, target);
  1552. else
  1553. ret = do_cpu_down(dev->id, target);
  1554. out:
  1555. unlock_device_hotplug();
  1556. return ret ? ret : count;
  1557. }
  1558. static ssize_t show_cpuhp_target(struct device *dev,
  1559. struct device_attribute *attr, char *buf)
  1560. {
  1561. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  1562. return sprintf(buf, "%d\n", st->target);
  1563. }
  1564. static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
  1565. static ssize_t write_cpuhp_fail(struct device *dev,
  1566. struct device_attribute *attr,
  1567. const char *buf, size_t count)
  1568. {
  1569. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  1570. struct cpuhp_step *sp;
  1571. int fail, ret;
  1572. ret = kstrtoint(buf, 10, &fail);
  1573. if (ret)
  1574. return ret;
  1575. /*
  1576. * Cannot fail STARTING/DYING callbacks.
  1577. */
  1578. if (cpuhp_is_atomic_state(fail))
  1579. return -EINVAL;
  1580. /*
  1581. * Cannot fail anything that doesn't have callbacks.
  1582. */
  1583. mutex_lock(&cpuhp_state_mutex);
  1584. sp = cpuhp_get_step(fail);
  1585. if (!sp->startup.single && !sp->teardown.single)
  1586. ret = -EINVAL;
  1587. mutex_unlock(&cpuhp_state_mutex);
  1588. if (ret)
  1589. return ret;
  1590. st->fail = fail;
  1591. return count;
  1592. }
  1593. static ssize_t show_cpuhp_fail(struct device *dev,
  1594. struct device_attribute *attr, char *buf)
  1595. {
  1596. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  1597. return sprintf(buf, "%d\n", st->fail);
  1598. }
  1599. static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
  1600. static struct attribute *cpuhp_cpu_attrs[] = {
  1601. &dev_attr_state.attr,
  1602. &dev_attr_target.attr,
  1603. &dev_attr_fail.attr,
  1604. NULL
  1605. };
  1606. static const struct attribute_group cpuhp_cpu_attr_group = {
  1607. .attrs = cpuhp_cpu_attrs,
  1608. .name = "hotplug",
  1609. NULL
  1610. };
  1611. static ssize_t show_cpuhp_states(struct device *dev,
  1612. struct device_attribute *attr, char *buf)
  1613. {
  1614. ssize_t cur, res = 0;
  1615. int i;
  1616. mutex_lock(&cpuhp_state_mutex);
  1617. for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
  1618. struct cpuhp_step *sp = cpuhp_get_step(i);
  1619. if (sp->name) {
  1620. cur = sprintf(buf, "%3d: %s\n", i, sp->name);
  1621. buf += cur;
  1622. res += cur;
  1623. }
  1624. }
  1625. mutex_unlock(&cpuhp_state_mutex);
  1626. return res;
  1627. }
  1628. static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
  1629. static struct attribute *cpuhp_cpu_root_attrs[] = {
  1630. &dev_attr_states.attr,
  1631. NULL
  1632. };
  1633. static const struct attribute_group cpuhp_cpu_root_attr_group = {
  1634. .attrs = cpuhp_cpu_root_attrs,
  1635. .name = "hotplug",
  1636. NULL
  1637. };
  1638. static int __init cpuhp_sysfs_init(void)
  1639. {
  1640. int cpu, ret;
  1641. ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
  1642. &cpuhp_cpu_root_attr_group);
  1643. if (ret)
  1644. return ret;
  1645. for_each_possible_cpu(cpu) {
  1646. struct device *dev = get_cpu_device(cpu);
  1647. if (!dev)
  1648. continue;
  1649. ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
  1650. if (ret)
  1651. return ret;
  1652. }
  1653. return 0;
  1654. }
  1655. device_initcall(cpuhp_sysfs_init);
  1656. #endif
  1657. /*
  1658. * cpu_bit_bitmap[] is a special, "compressed" data structure that
  1659. * represents all NR_CPUS bits binary values of 1<<nr.
  1660. *
  1661. * It is used by cpumask_of() to get a constant address to a CPU
  1662. * mask value that has a single bit set only.
  1663. */
  1664. /* cpu_bit_bitmap[0] is empty - so we can back into it */
  1665. #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
  1666. #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
  1667. #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
  1668. #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
  1669. const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
  1670. MASK_DECLARE_8(0), MASK_DECLARE_8(8),
  1671. MASK_DECLARE_8(16), MASK_DECLARE_8(24),
  1672. #if BITS_PER_LONG > 32
  1673. MASK_DECLARE_8(32), MASK_DECLARE_8(40),
  1674. MASK_DECLARE_8(48), MASK_DECLARE_8(56),
  1675. #endif
  1676. };
  1677. EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
  1678. const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
  1679. EXPORT_SYMBOL(cpu_all_bits);
  1680. #ifdef CONFIG_INIT_ALL_POSSIBLE
  1681. struct cpumask __cpu_possible_mask __read_mostly
  1682. = {CPU_BITS_ALL};
  1683. #else
  1684. struct cpumask __cpu_possible_mask __read_mostly;
  1685. #endif
  1686. EXPORT_SYMBOL(__cpu_possible_mask);
  1687. struct cpumask __cpu_online_mask __read_mostly;
  1688. EXPORT_SYMBOL(__cpu_online_mask);
  1689. struct cpumask __cpu_present_mask __read_mostly;
  1690. EXPORT_SYMBOL(__cpu_present_mask);
  1691. struct cpumask __cpu_active_mask __read_mostly;
  1692. EXPORT_SYMBOL(__cpu_active_mask);
  1693. void init_cpu_present(const struct cpumask *src)
  1694. {
  1695. cpumask_copy(&__cpu_present_mask, src);
  1696. }
  1697. void init_cpu_possible(const struct cpumask *src)
  1698. {
  1699. cpumask_copy(&__cpu_possible_mask, src);
  1700. }
  1701. void init_cpu_online(const struct cpumask *src)
  1702. {
  1703. cpumask_copy(&__cpu_online_mask, src);
  1704. }
  1705. /*
  1706. * Activate the first processor.
  1707. */
  1708. void __init boot_cpu_init(void)
  1709. {
  1710. int cpu = smp_processor_id();
  1711. /* Mark the boot cpu "present", "online" etc for SMP and UP case */
  1712. set_cpu_online(cpu, true);
  1713. set_cpu_active(cpu, true);
  1714. set_cpu_present(cpu, true);
  1715. set_cpu_possible(cpu, true);
  1716. #ifdef CONFIG_SMP
  1717. __boot_cpu_id = cpu;
  1718. #endif
  1719. }
  1720. /*
  1721. * Must be called _AFTER_ setting up the per_cpu areas
  1722. */
  1723. void __init boot_cpu_state_init(void)
  1724. {
  1725. per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
  1726. }