cpu.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920
  1. /* CPU control.
  2. * (C) 2001, 2002, 2003, 2004 Rusty Russell
  3. *
  4. * This code is licenced under the GPL.
  5. */
  6. #include <linux/proc_fs.h>
  7. #include <linux/smp.h>
  8. #include <linux/init.h>
  9. #include <linux/notifier.h>
  10. #include <linux/sched.h>
  11. #include <linux/unistd.h>
  12. #include <linux/cpu.h>
  13. #include <linux/oom.h>
  14. #include <linux/rcupdate.h>
  15. #include <linux/export.h>
  16. #include <linux/bug.h>
  17. #include <linux/kthread.h>
  18. #include <linux/stop_machine.h>
  19. #include <linux/mutex.h>
  20. #include <linux/gfp.h>
  21. #include <linux/suspend.h>
  22. #include <linux/lockdep.h>
  23. #include <linux/tick.h>
  24. #include <linux/irq.h>
  25. #include <linux/smpboot.h>
  26. #include <trace/events/power.h>
  27. #define CREATE_TRACE_POINTS
  28. #include <trace/events/cpuhp.h>
  29. #include "smpboot.h"
  30. /**
  31. * cpuhp_cpu_state - Per cpu hotplug state storage
  32. * @state: The current cpu state
  33. * @target: The target state
  34. * @thread: Pointer to the hotplug thread
  35. * @should_run: Thread should execute
  36. * @rollback: Perform a rollback
  37. * @single: Single callback invocation
  38. * @bringup: Single callback bringup or teardown selector
  39. * @cb_state: The state for a single callback (install/uninstall)
  40. * @result: Result of the operation
  41. * @done: Signal completion to the issuer of the task
  42. */
  43. struct cpuhp_cpu_state {
  44. enum cpuhp_state state;
  45. enum cpuhp_state target;
  46. #ifdef CONFIG_SMP
  47. struct task_struct *thread;
  48. bool should_run;
  49. bool rollback;
  50. bool single;
  51. bool bringup;
  52. struct hlist_node *node;
  53. enum cpuhp_state cb_state;
  54. int result;
  55. struct completion done;
  56. #endif
  57. };
  58. static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
  59. /**
  60. * cpuhp_step - Hotplug state machine step
  61. * @name: Name of the step
  62. * @startup: Startup function of the step
  63. * @teardown: Teardown function of the step
  64. * @skip_onerr: Do not invoke the functions on error rollback
  65. * Will go away once the notifiers are gone
  66. * @cant_stop: Bringup/teardown can't be stopped at this step
  67. */
  68. struct cpuhp_step {
  69. const char *name;
  70. union {
  71. int (*single)(unsigned int cpu);
  72. int (*multi)(unsigned int cpu,
  73. struct hlist_node *node);
  74. } startup;
  75. union {
  76. int (*single)(unsigned int cpu);
  77. int (*multi)(unsigned int cpu,
  78. struct hlist_node *node);
  79. } teardown;
  80. struct hlist_head list;
  81. bool skip_onerr;
  82. bool cant_stop;
  83. bool multi_instance;
  84. };
  85. static DEFINE_MUTEX(cpuhp_state_mutex);
  86. static struct cpuhp_step cpuhp_bp_states[];
  87. static struct cpuhp_step cpuhp_ap_states[];
  88. static bool cpuhp_is_ap_state(enum cpuhp_state state)
  89. {
  90. /*
  91. * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
  92. * purposes as that state is handled explicitly in cpu_down.
  93. */
  94. return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
  95. }
  96. static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
  97. {
  98. struct cpuhp_step *sp;
  99. sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
  100. return sp + state;
  101. }
  102. /**
  103. * cpuhp_invoke_callback _ Invoke the callbacks for a given state
  104. * @cpu: The cpu for which the callback should be invoked
  105. * @step: The step in the state machine
  106. * @bringup: True if the bringup callback should be invoked
  107. *
  108. * Called from cpu hotplug and from the state register machinery.
  109. */
  110. static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
  111. bool bringup, struct hlist_node *node)
  112. {
  113. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  114. struct cpuhp_step *step = cpuhp_get_step(state);
  115. int (*cbm)(unsigned int cpu, struct hlist_node *node);
  116. int (*cb)(unsigned int cpu);
  117. int ret, cnt;
  118. if (!step->multi_instance) {
  119. cb = bringup ? step->startup.single : step->teardown.single;
  120. if (!cb)
  121. return 0;
  122. trace_cpuhp_enter(cpu, st->target, state, cb);
  123. ret = cb(cpu);
  124. trace_cpuhp_exit(cpu, st->state, state, ret);
  125. return ret;
  126. }
  127. cbm = bringup ? step->startup.multi : step->teardown.multi;
  128. if (!cbm)
  129. return 0;
  130. /* Single invocation for instance add/remove */
  131. if (node) {
  132. trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
  133. ret = cbm(cpu, node);
  134. trace_cpuhp_exit(cpu, st->state, state, ret);
  135. return ret;
  136. }
  137. /* State transition. Invoke on all instances */
  138. cnt = 0;
  139. hlist_for_each(node, &step->list) {
  140. trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
  141. ret = cbm(cpu, node);
  142. trace_cpuhp_exit(cpu, st->state, state, ret);
  143. if (ret)
  144. goto err;
  145. cnt++;
  146. }
  147. return 0;
  148. err:
  149. /* Rollback the instances if one failed */
  150. cbm = !bringup ? step->startup.multi : step->teardown.multi;
  151. if (!cbm)
  152. return ret;
  153. hlist_for_each(node, &step->list) {
  154. if (!cnt--)
  155. break;
  156. cbm(cpu, node);
  157. }
  158. return ret;
  159. }
  160. #ifdef CONFIG_SMP
  161. /* Serializes the updates to cpu_online_mask, cpu_present_mask */
  162. static DEFINE_MUTEX(cpu_add_remove_lock);
  163. bool cpuhp_tasks_frozen;
  164. EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
  165. /*
  166. * The following two APIs (cpu_maps_update_begin/done) must be used when
  167. * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
  168. * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
  169. * hotplug callback (un)registration performed using __register_cpu_notifier()
  170. * or __unregister_cpu_notifier().
  171. */
  172. void cpu_maps_update_begin(void)
  173. {
  174. mutex_lock(&cpu_add_remove_lock);
  175. }
  176. EXPORT_SYMBOL(cpu_notifier_register_begin);
  177. void cpu_maps_update_done(void)
  178. {
  179. mutex_unlock(&cpu_add_remove_lock);
  180. }
  181. EXPORT_SYMBOL(cpu_notifier_register_done);
  182. static RAW_NOTIFIER_HEAD(cpu_chain);
  183. /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  184. * Should always be manipulated under cpu_add_remove_lock
  185. */
  186. static int cpu_hotplug_disabled;
  187. #ifdef CONFIG_HOTPLUG_CPU
  188. static struct {
  189. struct task_struct *active_writer;
  190. /* wait queue to wake up the active_writer */
  191. wait_queue_head_t wq;
  192. /* verifies that no writer will get active while readers are active */
  193. struct mutex lock;
  194. /*
  195. * Also blocks the new readers during
  196. * an ongoing cpu hotplug operation.
  197. */
  198. atomic_t refcount;
  199. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  200. struct lockdep_map dep_map;
  201. #endif
  202. } cpu_hotplug = {
  203. .active_writer = NULL,
  204. .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
  205. .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
  206. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  207. .dep_map = {.name = "cpu_hotplug.lock" },
  208. #endif
  209. };
  210. /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
  211. #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
  212. #define cpuhp_lock_acquire_tryread() \
  213. lock_map_acquire_tryread(&cpu_hotplug.dep_map)
  214. #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
  215. #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
  216. void get_online_cpus(void)
  217. {
  218. might_sleep();
  219. if (cpu_hotplug.active_writer == current)
  220. return;
  221. cpuhp_lock_acquire_read();
  222. mutex_lock(&cpu_hotplug.lock);
  223. atomic_inc(&cpu_hotplug.refcount);
  224. mutex_unlock(&cpu_hotplug.lock);
  225. }
  226. EXPORT_SYMBOL_GPL(get_online_cpus);
  227. void put_online_cpus(void)
  228. {
  229. int refcount;
  230. if (cpu_hotplug.active_writer == current)
  231. return;
  232. refcount = atomic_dec_return(&cpu_hotplug.refcount);
  233. if (WARN_ON(refcount < 0)) /* try to fix things up */
  234. atomic_inc(&cpu_hotplug.refcount);
  235. if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
  236. wake_up(&cpu_hotplug.wq);
  237. cpuhp_lock_release();
  238. }
  239. EXPORT_SYMBOL_GPL(put_online_cpus);
  240. /*
  241. * This ensures that the hotplug operation can begin only when the
  242. * refcount goes to zero.
  243. *
  244. * Note that during a cpu-hotplug operation, the new readers, if any,
  245. * will be blocked by the cpu_hotplug.lock
  246. *
  247. * Since cpu_hotplug_begin() is always called after invoking
  248. * cpu_maps_update_begin(), we can be sure that only one writer is active.
  249. *
  250. * Note that theoretically, there is a possibility of a livelock:
  251. * - Refcount goes to zero, last reader wakes up the sleeping
  252. * writer.
  253. * - Last reader unlocks the cpu_hotplug.lock.
  254. * - A new reader arrives at this moment, bumps up the refcount.
  255. * - The writer acquires the cpu_hotplug.lock finds the refcount
  256. * non zero and goes to sleep again.
  257. *
  258. * However, this is very difficult to achieve in practice since
  259. * get_online_cpus() not an api which is called all that often.
  260. *
  261. */
  262. void cpu_hotplug_begin(void)
  263. {
  264. DEFINE_WAIT(wait);
  265. cpu_hotplug.active_writer = current;
  266. cpuhp_lock_acquire();
  267. for (;;) {
  268. mutex_lock(&cpu_hotplug.lock);
  269. prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
  270. if (likely(!atomic_read(&cpu_hotplug.refcount)))
  271. break;
  272. mutex_unlock(&cpu_hotplug.lock);
  273. schedule();
  274. }
  275. finish_wait(&cpu_hotplug.wq, &wait);
  276. }
  277. void cpu_hotplug_done(void)
  278. {
  279. cpu_hotplug.active_writer = NULL;
  280. mutex_unlock(&cpu_hotplug.lock);
  281. cpuhp_lock_release();
  282. }
  283. /*
  284. * Wait for currently running CPU hotplug operations to complete (if any) and
  285. * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
  286. * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
  287. * hotplug path before performing hotplug operations. So acquiring that lock
  288. * guarantees mutual exclusion from any currently running hotplug operations.
  289. */
  290. void cpu_hotplug_disable(void)
  291. {
  292. cpu_maps_update_begin();
  293. cpu_hotplug_disabled++;
  294. cpu_maps_update_done();
  295. }
  296. EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
  297. static void __cpu_hotplug_enable(void)
  298. {
  299. if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
  300. return;
  301. cpu_hotplug_disabled--;
  302. }
  303. void cpu_hotplug_enable(void)
  304. {
  305. cpu_maps_update_begin();
  306. __cpu_hotplug_enable();
  307. cpu_maps_update_done();
  308. }
  309. EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
  310. #endif /* CONFIG_HOTPLUG_CPU */
  311. /* Need to know about CPUs going up/down? */
  312. int register_cpu_notifier(struct notifier_block *nb)
  313. {
  314. int ret;
  315. cpu_maps_update_begin();
  316. ret = raw_notifier_chain_register(&cpu_chain, nb);
  317. cpu_maps_update_done();
  318. return ret;
  319. }
  320. int __register_cpu_notifier(struct notifier_block *nb)
  321. {
  322. return raw_notifier_chain_register(&cpu_chain, nb);
  323. }
  324. static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
  325. int *nr_calls)
  326. {
  327. unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
  328. void *hcpu = (void *)(long)cpu;
  329. int ret;
  330. ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
  331. nr_calls);
  332. return notifier_to_errno(ret);
  333. }
  334. static int cpu_notify(unsigned long val, unsigned int cpu)
  335. {
  336. return __cpu_notify(val, cpu, -1, NULL);
  337. }
  338. static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
  339. {
  340. BUG_ON(cpu_notify(val, cpu));
  341. }
  342. /* Notifier wrappers for transitioning to state machine */
  343. static int notify_prepare(unsigned int cpu)
  344. {
  345. int nr_calls = 0;
  346. int ret;
  347. ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
  348. if (ret) {
  349. nr_calls--;
  350. printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
  351. __func__, cpu);
  352. __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
  353. }
  354. return ret;
  355. }
  356. static int notify_online(unsigned int cpu)
  357. {
  358. cpu_notify(CPU_ONLINE, cpu);
  359. return 0;
  360. }
  361. static int bringup_wait_for_ap(unsigned int cpu)
  362. {
  363. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  364. wait_for_completion(&st->done);
  365. return st->result;
  366. }
  367. static int bringup_cpu(unsigned int cpu)
  368. {
  369. struct task_struct *idle = idle_thread_get(cpu);
  370. int ret;
  371. /*
  372. * Some architectures have to walk the irq descriptors to
  373. * setup the vector space for the cpu which comes online.
  374. * Prevent irq alloc/free across the bringup.
  375. */
  376. irq_lock_sparse();
  377. /* Arch-specific enabling code. */
  378. ret = __cpu_up(cpu, idle);
  379. irq_unlock_sparse();
  380. if (ret) {
  381. cpu_notify(CPU_UP_CANCELED, cpu);
  382. return ret;
  383. }
  384. ret = bringup_wait_for_ap(cpu);
  385. BUG_ON(!cpu_online(cpu));
  386. return ret;
  387. }
  388. /*
  389. * Hotplug state machine related functions
  390. */
  391. static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
  392. {
  393. for (st->state++; st->state < st->target; st->state++) {
  394. struct cpuhp_step *step = cpuhp_get_step(st->state);
  395. if (!step->skip_onerr)
  396. cpuhp_invoke_callback(cpu, st->state, true, NULL);
  397. }
  398. }
  399. static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
  400. enum cpuhp_state target)
  401. {
  402. enum cpuhp_state prev_state = st->state;
  403. int ret = 0;
  404. for (; st->state > target; st->state--) {
  405. ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
  406. if (ret) {
  407. st->target = prev_state;
  408. undo_cpu_down(cpu, st);
  409. break;
  410. }
  411. }
  412. return ret;
  413. }
  414. static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
  415. {
  416. for (st->state--; st->state > st->target; st->state--) {
  417. struct cpuhp_step *step = cpuhp_get_step(st->state);
  418. if (!step->skip_onerr)
  419. cpuhp_invoke_callback(cpu, st->state, false, NULL);
  420. }
  421. }
  422. static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
  423. enum cpuhp_state target)
  424. {
  425. enum cpuhp_state prev_state = st->state;
  426. int ret = 0;
  427. while (st->state < target) {
  428. st->state++;
  429. ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
  430. if (ret) {
  431. st->target = prev_state;
  432. undo_cpu_up(cpu, st);
  433. break;
  434. }
  435. }
  436. return ret;
  437. }
  438. /*
  439. * The cpu hotplug threads manage the bringup and teardown of the cpus
  440. */
  441. static void cpuhp_create(unsigned int cpu)
  442. {
  443. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  444. init_completion(&st->done);
  445. }
  446. static int cpuhp_should_run(unsigned int cpu)
  447. {
  448. struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  449. return st->should_run;
  450. }
  451. /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
  452. static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
  453. {
  454. enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
  455. return cpuhp_down_callbacks(cpu, st, target);
  456. }
  457. /* Execute the online startup callbacks. Used to be CPU_ONLINE */
  458. static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
  459. {
  460. return cpuhp_up_callbacks(cpu, st, st->target);
  461. }
  462. /*
  463. * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
  464. * callbacks when a state gets [un]installed at runtime.
  465. */
  466. static void cpuhp_thread_fun(unsigned int cpu)
  467. {
  468. struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  469. int ret = 0;
  470. /*
  471. * Paired with the mb() in cpuhp_kick_ap_work and
  472. * cpuhp_invoke_ap_callback, so the work set is consistent visible.
  473. */
  474. smp_mb();
  475. if (!st->should_run)
  476. return;
  477. st->should_run = false;
  478. /* Single callback invocation for [un]install ? */
  479. if (st->single) {
  480. if (st->cb_state < CPUHP_AP_ONLINE) {
  481. local_irq_disable();
  482. ret = cpuhp_invoke_callback(cpu, st->cb_state,
  483. st->bringup, st->node);
  484. local_irq_enable();
  485. } else {
  486. ret = cpuhp_invoke_callback(cpu, st->cb_state,
  487. st->bringup, st->node);
  488. }
  489. } else if (st->rollback) {
  490. BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
  491. undo_cpu_down(cpu, st);
  492. /*
  493. * This is a momentary workaround to keep the notifier users
  494. * happy. Will go away once we got rid of the notifiers.
  495. */
  496. cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
  497. st->rollback = false;
  498. } else {
  499. /* Cannot happen .... */
  500. BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
  501. /* Regular hotplug work */
  502. if (st->state < st->target)
  503. ret = cpuhp_ap_online(cpu, st);
  504. else if (st->state > st->target)
  505. ret = cpuhp_ap_offline(cpu, st);
  506. }
  507. st->result = ret;
  508. complete(&st->done);
  509. }
  510. /* Invoke a single callback on a remote cpu */
  511. static int
  512. cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
  513. struct hlist_node *node)
  514. {
  515. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  516. if (!cpu_online(cpu))
  517. return 0;
  518. /*
  519. * If we are up and running, use the hotplug thread. For early calls
  520. * we invoke the thread function directly.
  521. */
  522. if (!st->thread)
  523. return cpuhp_invoke_callback(cpu, state, bringup, node);
  524. st->cb_state = state;
  525. st->single = true;
  526. st->bringup = bringup;
  527. st->node = node;
  528. /*
  529. * Make sure the above stores are visible before should_run becomes
  530. * true. Paired with the mb() above in cpuhp_thread_fun()
  531. */
  532. smp_mb();
  533. st->should_run = true;
  534. wake_up_process(st->thread);
  535. wait_for_completion(&st->done);
  536. return st->result;
  537. }
  538. /* Regular hotplug invocation of the AP hotplug thread */
  539. static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
  540. {
  541. st->result = 0;
  542. st->single = false;
  543. /*
  544. * Make sure the above stores are visible before should_run becomes
  545. * true. Paired with the mb() above in cpuhp_thread_fun()
  546. */
  547. smp_mb();
  548. st->should_run = true;
  549. wake_up_process(st->thread);
  550. }
  551. static int cpuhp_kick_ap_work(unsigned int cpu)
  552. {
  553. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  554. enum cpuhp_state state = st->state;
  555. trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
  556. __cpuhp_kick_ap_work(st);
  557. wait_for_completion(&st->done);
  558. trace_cpuhp_exit(cpu, st->state, state, st->result);
  559. return st->result;
  560. }
  561. static struct smp_hotplug_thread cpuhp_threads = {
  562. .store = &cpuhp_state.thread,
  563. .create = &cpuhp_create,
  564. .thread_should_run = cpuhp_should_run,
  565. .thread_fn = cpuhp_thread_fun,
  566. .thread_comm = "cpuhp/%u",
  567. .selfparking = true,
  568. };
  569. void __init cpuhp_threads_init(void)
  570. {
  571. BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
  572. kthread_unpark(this_cpu_read(cpuhp_state.thread));
  573. }
  574. #ifdef CONFIG_HOTPLUG_CPU
  575. EXPORT_SYMBOL(register_cpu_notifier);
  576. EXPORT_SYMBOL(__register_cpu_notifier);
  577. void unregister_cpu_notifier(struct notifier_block *nb)
  578. {
  579. cpu_maps_update_begin();
  580. raw_notifier_chain_unregister(&cpu_chain, nb);
  581. cpu_maps_update_done();
  582. }
  583. EXPORT_SYMBOL(unregister_cpu_notifier);
  584. void __unregister_cpu_notifier(struct notifier_block *nb)
  585. {
  586. raw_notifier_chain_unregister(&cpu_chain, nb);
  587. }
  588. EXPORT_SYMBOL(__unregister_cpu_notifier);
  589. /**
  590. * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
  591. * @cpu: a CPU id
  592. *
  593. * This function walks all processes, finds a valid mm struct for each one and
  594. * then clears a corresponding bit in mm's cpumask. While this all sounds
  595. * trivial, there are various non-obvious corner cases, which this function
  596. * tries to solve in a safe manner.
  597. *
  598. * Also note that the function uses a somewhat relaxed locking scheme, so it may
  599. * be called only for an already offlined CPU.
  600. */
  601. void clear_tasks_mm_cpumask(int cpu)
  602. {
  603. struct task_struct *p;
  604. /*
  605. * This function is called after the cpu is taken down and marked
  606. * offline, so its not like new tasks will ever get this cpu set in
  607. * their mm mask. -- Peter Zijlstra
  608. * Thus, we may use rcu_read_lock() here, instead of grabbing
  609. * full-fledged tasklist_lock.
  610. */
  611. WARN_ON(cpu_online(cpu));
  612. rcu_read_lock();
  613. for_each_process(p) {
  614. struct task_struct *t;
  615. /*
  616. * Main thread might exit, but other threads may still have
  617. * a valid mm. Find one.
  618. */
  619. t = find_lock_task_mm(p);
  620. if (!t)
  621. continue;
  622. cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
  623. task_unlock(t);
  624. }
  625. rcu_read_unlock();
  626. }
  627. static inline void check_for_tasks(int dead_cpu)
  628. {
  629. struct task_struct *g, *p;
  630. read_lock(&tasklist_lock);
  631. for_each_process_thread(g, p) {
  632. if (!p->on_rq)
  633. continue;
  634. /*
  635. * We do the check with unlocked task_rq(p)->lock.
  636. * Order the reading to do not warn about a task,
  637. * which was running on this cpu in the past, and
  638. * it's just been woken on another cpu.
  639. */
  640. rmb();
  641. if (task_cpu(p) != dead_cpu)
  642. continue;
  643. pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
  644. p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
  645. }
  646. read_unlock(&tasklist_lock);
  647. }
  648. static int notify_down_prepare(unsigned int cpu)
  649. {
  650. int err, nr_calls = 0;
  651. err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
  652. if (err) {
  653. nr_calls--;
  654. __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
  655. pr_warn("%s: attempt to take down CPU %u failed\n",
  656. __func__, cpu);
  657. }
  658. return err;
  659. }
  660. /* Take this CPU down. */
  661. static int take_cpu_down(void *_param)
  662. {
  663. struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  664. enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
  665. int err, cpu = smp_processor_id();
  666. /* Ensure this CPU doesn't handle any more interrupts. */
  667. err = __cpu_disable();
  668. if (err < 0)
  669. return err;
  670. /*
  671. * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
  672. * do this step again.
  673. */
  674. WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
  675. st->state--;
  676. /* Invoke the former CPU_DYING callbacks */
  677. for (; st->state > target; st->state--)
  678. cpuhp_invoke_callback(cpu, st->state, false, NULL);
  679. /* Give up timekeeping duties */
  680. tick_handover_do_timer();
  681. /* Park the stopper thread */
  682. stop_machine_park(cpu);
  683. return 0;
  684. }
  685. static int takedown_cpu(unsigned int cpu)
  686. {
  687. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  688. int err;
  689. /* Park the smpboot threads */
  690. kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
  691. smpboot_park_threads(cpu);
  692. /*
  693. * Prevent irq alloc/free while the dying cpu reorganizes the
  694. * interrupt affinities.
  695. */
  696. irq_lock_sparse();
  697. /*
  698. * So now all preempt/rcu users must observe !cpu_active().
  699. */
  700. err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
  701. if (err) {
  702. /* CPU refused to die */
  703. irq_unlock_sparse();
  704. /* Unpark the hotplug thread so we can rollback there */
  705. kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
  706. return err;
  707. }
  708. BUG_ON(cpu_online(cpu));
  709. /*
  710. * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
  711. * runnable tasks from the cpu, there's only the idle task left now
  712. * that the migration thread is done doing the stop_machine thing.
  713. *
  714. * Wait for the stop thread to go away.
  715. */
  716. wait_for_completion(&st->done);
  717. BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
  718. /* Interrupts are moved away from the dying cpu, reenable alloc/free */
  719. irq_unlock_sparse();
  720. hotplug_cpu__broadcast_tick_pull(cpu);
  721. /* This actually kills the CPU. */
  722. __cpu_die(cpu);
  723. tick_cleanup_dead_cpu(cpu);
  724. return 0;
  725. }
  726. static int notify_dead(unsigned int cpu)
  727. {
  728. cpu_notify_nofail(CPU_DEAD, cpu);
  729. check_for_tasks(cpu);
  730. return 0;
  731. }
  732. static void cpuhp_complete_idle_dead(void *arg)
  733. {
  734. struct cpuhp_cpu_state *st = arg;
  735. complete(&st->done);
  736. }
  737. void cpuhp_report_idle_dead(void)
  738. {
  739. struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  740. BUG_ON(st->state != CPUHP_AP_OFFLINE);
  741. rcu_report_dead(smp_processor_id());
  742. st->state = CPUHP_AP_IDLE_DEAD;
  743. /*
  744. * We cannot call complete after rcu_report_dead() so we delegate it
  745. * to an online cpu.
  746. */
  747. smp_call_function_single(cpumask_first(cpu_online_mask),
  748. cpuhp_complete_idle_dead, st, 0);
  749. }
  750. #else
  751. #define notify_down_prepare NULL
  752. #define takedown_cpu NULL
  753. #define notify_dead NULL
  754. #endif
  755. #ifdef CONFIG_HOTPLUG_CPU
  756. /* Requires cpu_add_remove_lock to be held */
  757. static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
  758. enum cpuhp_state target)
  759. {
  760. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  761. int prev_state, ret = 0;
  762. bool hasdied = false;
  763. if (num_online_cpus() == 1)
  764. return -EBUSY;
  765. if (!cpu_present(cpu))
  766. return -EINVAL;
  767. cpu_hotplug_begin();
  768. cpuhp_tasks_frozen = tasks_frozen;
  769. prev_state = st->state;
  770. st->target = target;
  771. /*
  772. * If the current CPU state is in the range of the AP hotplug thread,
  773. * then we need to kick the thread.
  774. */
  775. if (st->state > CPUHP_TEARDOWN_CPU) {
  776. ret = cpuhp_kick_ap_work(cpu);
  777. /*
  778. * The AP side has done the error rollback already. Just
  779. * return the error code..
  780. */
  781. if (ret)
  782. goto out;
  783. /*
  784. * We might have stopped still in the range of the AP hotplug
  785. * thread. Nothing to do anymore.
  786. */
  787. if (st->state > CPUHP_TEARDOWN_CPU)
  788. goto out;
  789. }
  790. /*
  791. * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
  792. * to do the further cleanups.
  793. */
  794. ret = cpuhp_down_callbacks(cpu, st, target);
  795. if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
  796. st->target = prev_state;
  797. st->rollback = true;
  798. cpuhp_kick_ap_work(cpu);
  799. }
  800. hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
  801. out:
  802. cpu_hotplug_done();
  803. /* This post dead nonsense must die */
  804. if (!ret && hasdied)
  805. cpu_notify_nofail(CPU_POST_DEAD, cpu);
  806. return ret;
  807. }
  808. static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
  809. {
  810. int err;
  811. cpu_maps_update_begin();
  812. if (cpu_hotplug_disabled) {
  813. err = -EBUSY;
  814. goto out;
  815. }
  816. err = _cpu_down(cpu, 0, target);
  817. out:
  818. cpu_maps_update_done();
  819. return err;
  820. }
  821. int cpu_down(unsigned int cpu)
  822. {
  823. return do_cpu_down(cpu, CPUHP_OFFLINE);
  824. }
  825. EXPORT_SYMBOL(cpu_down);
  826. #endif /*CONFIG_HOTPLUG_CPU*/
  827. /**
  828. * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
  829. * @cpu: cpu that just started
  830. *
  831. * It must be called by the arch code on the new cpu, before the new cpu
  832. * enables interrupts and before the "boot" cpu returns from __cpu_up().
  833. */
  834. void notify_cpu_starting(unsigned int cpu)
  835. {
  836. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  837. enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
  838. while (st->state < target) {
  839. st->state++;
  840. cpuhp_invoke_callback(cpu, st->state, true, NULL);
  841. }
  842. }
  843. /*
  844. * Called from the idle task. We need to set active here, so we can kick off
  845. * the stopper thread and unpark the smpboot threads. If the target state is
  846. * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
  847. * cpu further.
  848. */
  849. void cpuhp_online_idle(enum cpuhp_state state)
  850. {
  851. struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  852. unsigned int cpu = smp_processor_id();
  853. /* Happens for the boot cpu */
  854. if (state != CPUHP_AP_ONLINE_IDLE)
  855. return;
  856. st->state = CPUHP_AP_ONLINE_IDLE;
  857. /* Unpark the stopper thread and the hotplug thread of this cpu */
  858. stop_machine_unpark(cpu);
  859. kthread_unpark(st->thread);
  860. /* Should we go further up ? */
  861. if (st->target > CPUHP_AP_ONLINE_IDLE)
  862. __cpuhp_kick_ap_work(st);
  863. else
  864. complete(&st->done);
  865. }
  866. /* Requires cpu_add_remove_lock to be held */
  867. static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
  868. {
  869. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  870. struct task_struct *idle;
  871. int ret = 0;
  872. cpu_hotplug_begin();
  873. if (!cpu_present(cpu)) {
  874. ret = -EINVAL;
  875. goto out;
  876. }
  877. /*
  878. * The caller of do_cpu_up might have raced with another
  879. * caller. Ignore it for now.
  880. */
  881. if (st->state >= target)
  882. goto out;
  883. if (st->state == CPUHP_OFFLINE) {
  884. /* Let it fail before we try to bring the cpu up */
  885. idle = idle_thread_get(cpu);
  886. if (IS_ERR(idle)) {
  887. ret = PTR_ERR(idle);
  888. goto out;
  889. }
  890. }
  891. cpuhp_tasks_frozen = tasks_frozen;
  892. st->target = target;
  893. /*
  894. * If the current CPU state is in the range of the AP hotplug thread,
  895. * then we need to kick the thread once more.
  896. */
  897. if (st->state > CPUHP_BRINGUP_CPU) {
  898. ret = cpuhp_kick_ap_work(cpu);
  899. /*
  900. * The AP side has done the error rollback already. Just
  901. * return the error code..
  902. */
  903. if (ret)
  904. goto out;
  905. }
  906. /*
  907. * Try to reach the target state. We max out on the BP at
  908. * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
  909. * responsible for bringing it up to the target state.
  910. */
  911. target = min((int)target, CPUHP_BRINGUP_CPU);
  912. ret = cpuhp_up_callbacks(cpu, st, target);
  913. out:
  914. cpu_hotplug_done();
  915. return ret;
  916. }
  917. static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
  918. {
  919. int err = 0;
  920. if (!cpu_possible(cpu)) {
  921. pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
  922. cpu);
  923. #if defined(CONFIG_IA64)
  924. pr_err("please check additional_cpus= boot parameter\n");
  925. #endif
  926. return -EINVAL;
  927. }
  928. err = try_online_node(cpu_to_node(cpu));
  929. if (err)
  930. return err;
  931. cpu_maps_update_begin();
  932. if (cpu_hotplug_disabled) {
  933. err = -EBUSY;
  934. goto out;
  935. }
  936. err = _cpu_up(cpu, 0, target);
  937. out:
  938. cpu_maps_update_done();
  939. return err;
  940. }
  941. int cpu_up(unsigned int cpu)
  942. {
  943. return do_cpu_up(cpu, CPUHP_ONLINE);
  944. }
  945. EXPORT_SYMBOL_GPL(cpu_up);
  946. #ifdef CONFIG_PM_SLEEP_SMP
  947. static cpumask_var_t frozen_cpus;
  948. int disable_nonboot_cpus(void)
  949. {
  950. int cpu, first_cpu, error = 0;
  951. cpu_maps_update_begin();
  952. first_cpu = cpumask_first(cpu_online_mask);
  953. /*
  954. * We take down all of the non-boot CPUs in one shot to avoid races
  955. * with the userspace trying to use the CPU hotplug at the same time
  956. */
  957. cpumask_clear(frozen_cpus);
  958. pr_info("Disabling non-boot CPUs ...\n");
  959. for_each_online_cpu(cpu) {
  960. if (cpu == first_cpu)
  961. continue;
  962. trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
  963. error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
  964. trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
  965. if (!error)
  966. cpumask_set_cpu(cpu, frozen_cpus);
  967. else {
  968. pr_err("Error taking CPU%d down: %d\n", cpu, error);
  969. break;
  970. }
  971. }
  972. if (!error)
  973. BUG_ON(num_online_cpus() > 1);
  974. else
  975. pr_err("Non-boot CPUs are not disabled\n");
  976. /*
  977. * Make sure the CPUs won't be enabled by someone else. We need to do
  978. * this even in case of failure as all disable_nonboot_cpus() users are
  979. * supposed to do enable_nonboot_cpus() on the failure path.
  980. */
  981. cpu_hotplug_disabled++;
  982. cpu_maps_update_done();
  983. return error;
  984. }
  985. void __weak arch_enable_nonboot_cpus_begin(void)
  986. {
  987. }
  988. void __weak arch_enable_nonboot_cpus_end(void)
  989. {
  990. }
  991. void enable_nonboot_cpus(void)
  992. {
  993. int cpu, error;
  994. /* Allow everyone to use the CPU hotplug again */
  995. cpu_maps_update_begin();
  996. __cpu_hotplug_enable();
  997. if (cpumask_empty(frozen_cpus))
  998. goto out;
  999. pr_info("Enabling non-boot CPUs ...\n");
  1000. arch_enable_nonboot_cpus_begin();
  1001. for_each_cpu(cpu, frozen_cpus) {
  1002. trace_suspend_resume(TPS("CPU_ON"), cpu, true);
  1003. error = _cpu_up(cpu, 1, CPUHP_ONLINE);
  1004. trace_suspend_resume(TPS("CPU_ON"), cpu, false);
  1005. if (!error) {
  1006. pr_info("CPU%d is up\n", cpu);
  1007. continue;
  1008. }
  1009. pr_warn("Error taking CPU%d up: %d\n", cpu, error);
  1010. }
  1011. arch_enable_nonboot_cpus_end();
  1012. cpumask_clear(frozen_cpus);
  1013. out:
  1014. cpu_maps_update_done();
  1015. }
  1016. static int __init alloc_frozen_cpus(void)
  1017. {
  1018. if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
  1019. return -ENOMEM;
  1020. return 0;
  1021. }
  1022. core_initcall(alloc_frozen_cpus);
  1023. /*
  1024. * When callbacks for CPU hotplug notifications are being executed, we must
  1025. * ensure that the state of the system with respect to the tasks being frozen
  1026. * or not, as reported by the notification, remains unchanged *throughout the
  1027. * duration* of the execution of the callbacks.
  1028. * Hence we need to prevent the freezer from racing with regular CPU hotplug.
  1029. *
  1030. * This synchronization is implemented by mutually excluding regular CPU
  1031. * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
  1032. * Hibernate notifications.
  1033. */
  1034. static int
  1035. cpu_hotplug_pm_callback(struct notifier_block *nb,
  1036. unsigned long action, void *ptr)
  1037. {
  1038. switch (action) {
  1039. case PM_SUSPEND_PREPARE:
  1040. case PM_HIBERNATION_PREPARE:
  1041. cpu_hotplug_disable();
  1042. break;
  1043. case PM_POST_SUSPEND:
  1044. case PM_POST_HIBERNATION:
  1045. cpu_hotplug_enable();
  1046. break;
  1047. default:
  1048. return NOTIFY_DONE;
  1049. }
  1050. return NOTIFY_OK;
  1051. }
  1052. static int __init cpu_hotplug_pm_sync_init(void)
  1053. {
  1054. /*
  1055. * cpu_hotplug_pm_callback has higher priority than x86
  1056. * bsp_pm_callback which depends on cpu_hotplug_pm_callback
  1057. * to disable cpu hotplug to avoid cpu hotplug race.
  1058. */
  1059. pm_notifier(cpu_hotplug_pm_callback, 0);
  1060. return 0;
  1061. }
  1062. core_initcall(cpu_hotplug_pm_sync_init);
  1063. #endif /* CONFIG_PM_SLEEP_SMP */
  1064. #endif /* CONFIG_SMP */
  1065. /* Boot processor state steps */
  1066. static struct cpuhp_step cpuhp_bp_states[] = {
  1067. [CPUHP_OFFLINE] = {
  1068. .name = "offline",
  1069. .startup.single = NULL,
  1070. .teardown.single = NULL,
  1071. },
  1072. #ifdef CONFIG_SMP
  1073. [CPUHP_CREATE_THREADS]= {
  1074. .name = "threads:prepare",
  1075. .startup.single = smpboot_create_threads,
  1076. .teardown.single = NULL,
  1077. .cant_stop = true,
  1078. },
  1079. [CPUHP_PERF_PREPARE] = {
  1080. .name = "perf:prepare",
  1081. .startup.single = perf_event_init_cpu,
  1082. .teardown.single = perf_event_exit_cpu,
  1083. },
  1084. [CPUHP_WORKQUEUE_PREP] = {
  1085. .name = "workqueue:prepare",
  1086. .startup.single = workqueue_prepare_cpu,
  1087. .teardown.single = NULL,
  1088. },
  1089. [CPUHP_HRTIMERS_PREPARE] = {
  1090. .name = "hrtimers:prepare",
  1091. .startup.single = hrtimers_prepare_cpu,
  1092. .teardown.single = hrtimers_dead_cpu,
  1093. },
  1094. [CPUHP_SMPCFD_PREPARE] = {
  1095. .name = "smpcfd:prepare",
  1096. .startup.single = smpcfd_prepare_cpu,
  1097. .teardown.single = smpcfd_dead_cpu,
  1098. },
  1099. [CPUHP_RCUTREE_PREP] = {
  1100. .name = "RCU/tree:prepare",
  1101. .startup.single = rcutree_prepare_cpu,
  1102. .teardown.single = rcutree_dead_cpu,
  1103. },
  1104. /*
  1105. * Preparatory and dead notifiers. Will be replaced once the notifiers
  1106. * are converted to states.
  1107. */
  1108. [CPUHP_NOTIFY_PREPARE] = {
  1109. .name = "notify:prepare",
  1110. .startup.single = notify_prepare,
  1111. .teardown.single = notify_dead,
  1112. .skip_onerr = true,
  1113. .cant_stop = true,
  1114. },
  1115. /*
  1116. * On the tear-down path, timers_dead_cpu() must be invoked
  1117. * before blk_mq_queue_reinit_notify() from notify_dead(),
  1118. * otherwise a RCU stall occurs.
  1119. */
  1120. [CPUHP_TIMERS_DEAD] = {
  1121. .name = "timers:dead",
  1122. .startup.single = NULL,
  1123. .teardown.single = timers_dead_cpu,
  1124. },
  1125. /* Kicks the plugged cpu into life */
  1126. [CPUHP_BRINGUP_CPU] = {
  1127. .name = "cpu:bringup",
  1128. .startup.single = bringup_cpu,
  1129. .teardown.single = NULL,
  1130. .cant_stop = true,
  1131. },
  1132. [CPUHP_AP_SMPCFD_DYING] = {
  1133. .name = "smpcfd:dying",
  1134. .startup.single = NULL,
  1135. .teardown.single = smpcfd_dying_cpu,
  1136. },
  1137. /*
  1138. * Handled on controll processor until the plugged processor manages
  1139. * this itself.
  1140. */
  1141. [CPUHP_TEARDOWN_CPU] = {
  1142. .name = "cpu:teardown",
  1143. .startup.single = NULL,
  1144. .teardown.single = takedown_cpu,
  1145. .cant_stop = true,
  1146. },
  1147. #else
  1148. [CPUHP_BRINGUP_CPU] = { },
  1149. #endif
  1150. };
  1151. /* Application processor state steps */
  1152. static struct cpuhp_step cpuhp_ap_states[] = {
  1153. #ifdef CONFIG_SMP
  1154. /* Final state before CPU kills itself */
  1155. [CPUHP_AP_IDLE_DEAD] = {
  1156. .name = "idle:dead",
  1157. },
  1158. /*
  1159. * Last state before CPU enters the idle loop to die. Transient state
  1160. * for synchronization.
  1161. */
  1162. [CPUHP_AP_OFFLINE] = {
  1163. .name = "ap:offline",
  1164. .cant_stop = true,
  1165. },
  1166. /* First state is scheduler control. Interrupts are disabled */
  1167. [CPUHP_AP_SCHED_STARTING] = {
  1168. .name = "sched:starting",
  1169. .startup.single = sched_cpu_starting,
  1170. .teardown.single = sched_cpu_dying,
  1171. },
  1172. [CPUHP_AP_RCUTREE_DYING] = {
  1173. .name = "RCU/tree:dying",
  1174. .startup.single = NULL,
  1175. .teardown.single = rcutree_dying_cpu,
  1176. },
  1177. /* Entry state on starting. Interrupts enabled from here on. Transient
  1178. * state for synchronsization */
  1179. [CPUHP_AP_ONLINE] = {
  1180. .name = "ap:online",
  1181. },
  1182. /* Handle smpboot threads park/unpark */
  1183. [CPUHP_AP_SMPBOOT_THREADS] = {
  1184. .name = "smpboot/threads:online",
  1185. .startup.single = smpboot_unpark_threads,
  1186. .teardown.single = NULL,
  1187. },
  1188. [CPUHP_AP_PERF_ONLINE] = {
  1189. .name = "perf:online",
  1190. .startup.single = perf_event_init_cpu,
  1191. .teardown.single = perf_event_exit_cpu,
  1192. },
  1193. [CPUHP_AP_WORKQUEUE_ONLINE] = {
  1194. .name = "workqueue:online",
  1195. .startup.single = workqueue_online_cpu,
  1196. .teardown.single = workqueue_offline_cpu,
  1197. },
  1198. [CPUHP_AP_RCUTREE_ONLINE] = {
  1199. .name = "RCU/tree:online",
  1200. .startup.single = rcutree_online_cpu,
  1201. .teardown.single = rcutree_offline_cpu,
  1202. },
  1203. /*
  1204. * Online/down_prepare notifiers. Will be removed once the notifiers
  1205. * are converted to states.
  1206. */
  1207. [CPUHP_AP_NOTIFY_ONLINE] = {
  1208. .name = "notify:online",
  1209. .startup.single = notify_online,
  1210. .teardown.single = notify_down_prepare,
  1211. .skip_onerr = true,
  1212. },
  1213. #endif
  1214. /*
  1215. * The dynamically registered state space is here
  1216. */
  1217. #ifdef CONFIG_SMP
  1218. /* Last state is scheduler control setting the cpu active */
  1219. [CPUHP_AP_ACTIVE] = {
  1220. .name = "sched:active",
  1221. .startup.single = sched_cpu_activate,
  1222. .teardown.single = sched_cpu_deactivate,
  1223. },
  1224. #endif
  1225. /* CPU is fully up and running. */
  1226. [CPUHP_ONLINE] = {
  1227. .name = "online",
  1228. .startup.single = NULL,
  1229. .teardown.single = NULL,
  1230. },
  1231. };
  1232. /* Sanity check for callbacks */
  1233. static int cpuhp_cb_check(enum cpuhp_state state)
  1234. {
  1235. if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
  1236. return -EINVAL;
  1237. return 0;
  1238. }
  1239. static void cpuhp_store_callbacks(enum cpuhp_state state,
  1240. const char *name,
  1241. int (*startup)(unsigned int cpu),
  1242. int (*teardown)(unsigned int cpu),
  1243. bool multi_instance)
  1244. {
  1245. /* (Un)Install the callbacks for further cpu hotplug operations */
  1246. struct cpuhp_step *sp;
  1247. mutex_lock(&cpuhp_state_mutex);
  1248. sp = cpuhp_get_step(state);
  1249. sp->startup.single = startup;
  1250. sp->teardown.single = teardown;
  1251. sp->name = name;
  1252. sp->multi_instance = multi_instance;
  1253. INIT_HLIST_HEAD(&sp->list);
  1254. mutex_unlock(&cpuhp_state_mutex);
  1255. }
  1256. static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
  1257. {
  1258. return cpuhp_get_step(state)->teardown.single;
  1259. }
  1260. /*
  1261. * Call the startup/teardown function for a step either on the AP or
  1262. * on the current CPU.
  1263. */
  1264. static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
  1265. struct hlist_node *node)
  1266. {
  1267. struct cpuhp_step *sp = cpuhp_get_step(state);
  1268. int ret;
  1269. if ((bringup && !sp->startup.single) ||
  1270. (!bringup && !sp->teardown.single))
  1271. return 0;
  1272. /*
  1273. * The non AP bound callbacks can fail on bringup. On teardown
  1274. * e.g. module removal we crash for now.
  1275. */
  1276. #ifdef CONFIG_SMP
  1277. if (cpuhp_is_ap_state(state))
  1278. ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
  1279. else
  1280. ret = cpuhp_invoke_callback(cpu, state, bringup, node);
  1281. #else
  1282. ret = cpuhp_invoke_callback(cpu, state, bringup, node);
  1283. #endif
  1284. BUG_ON(ret && !bringup);
  1285. return ret;
  1286. }
  1287. /*
  1288. * Called from __cpuhp_setup_state on a recoverable failure.
  1289. *
  1290. * Note: The teardown callbacks for rollback are not allowed to fail!
  1291. */
  1292. static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
  1293. struct hlist_node *node)
  1294. {
  1295. int cpu;
  1296. /* Roll back the already executed steps on the other cpus */
  1297. for_each_present_cpu(cpu) {
  1298. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  1299. int cpustate = st->state;
  1300. if (cpu >= failedcpu)
  1301. break;
  1302. /* Did we invoke the startup call on that cpu ? */
  1303. if (cpustate >= state)
  1304. cpuhp_issue_call(cpu, state, false, node);
  1305. }
  1306. }
  1307. /*
  1308. * Returns a free for dynamic slot assignment of the Online state. The states
  1309. * are protected by the cpuhp_slot_states mutex and an empty slot is identified
  1310. * by having no name assigned.
  1311. */
  1312. static int cpuhp_reserve_state(enum cpuhp_state state)
  1313. {
  1314. enum cpuhp_state i;
  1315. mutex_lock(&cpuhp_state_mutex);
  1316. for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
  1317. if (cpuhp_ap_states[i].name)
  1318. continue;
  1319. cpuhp_ap_states[i].name = "Reserved";
  1320. mutex_unlock(&cpuhp_state_mutex);
  1321. return i;
  1322. }
  1323. mutex_unlock(&cpuhp_state_mutex);
  1324. WARN(1, "No more dynamic states available for CPU hotplug\n");
  1325. return -ENOSPC;
  1326. }
  1327. int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
  1328. bool invoke)
  1329. {
  1330. struct cpuhp_step *sp;
  1331. int cpu;
  1332. int ret;
  1333. sp = cpuhp_get_step(state);
  1334. if (sp->multi_instance == false)
  1335. return -EINVAL;
  1336. get_online_cpus();
  1337. if (!invoke || !sp->startup.multi)
  1338. goto add_node;
  1339. /*
  1340. * Try to call the startup callback for each present cpu
  1341. * depending on the hotplug state of the cpu.
  1342. */
  1343. for_each_present_cpu(cpu) {
  1344. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  1345. int cpustate = st->state;
  1346. if (cpustate < state)
  1347. continue;
  1348. ret = cpuhp_issue_call(cpu, state, true, node);
  1349. if (ret) {
  1350. if (sp->teardown.multi)
  1351. cpuhp_rollback_install(cpu, state, node);
  1352. goto err;
  1353. }
  1354. }
  1355. add_node:
  1356. ret = 0;
  1357. mutex_lock(&cpuhp_state_mutex);
  1358. hlist_add_head(node, &sp->list);
  1359. mutex_unlock(&cpuhp_state_mutex);
  1360. err:
  1361. put_online_cpus();
  1362. return ret;
  1363. }
  1364. EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
  1365. /**
  1366. * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
  1367. * @state: The state to setup
  1368. * @invoke: If true, the startup function is invoked for cpus where
  1369. * cpu state >= @state
  1370. * @startup: startup callback function
  1371. * @teardown: teardown callback function
  1372. *
  1373. * Returns 0 if successful, otherwise a proper error code
  1374. */
  1375. int __cpuhp_setup_state(enum cpuhp_state state,
  1376. const char *name, bool invoke,
  1377. int (*startup)(unsigned int cpu),
  1378. int (*teardown)(unsigned int cpu),
  1379. bool multi_instance)
  1380. {
  1381. int cpu, ret = 0;
  1382. int dyn_state = 0;
  1383. if (cpuhp_cb_check(state) || !name)
  1384. return -EINVAL;
  1385. get_online_cpus();
  1386. /* currently assignments for the ONLINE state are possible */
  1387. if (state == CPUHP_AP_ONLINE_DYN) {
  1388. dyn_state = 1;
  1389. ret = cpuhp_reserve_state(state);
  1390. if (ret < 0)
  1391. goto out;
  1392. state = ret;
  1393. }
  1394. cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
  1395. if (!invoke || !startup)
  1396. goto out;
  1397. /*
  1398. * Try to call the startup callback for each present cpu
  1399. * depending on the hotplug state of the cpu.
  1400. */
  1401. for_each_present_cpu(cpu) {
  1402. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  1403. int cpustate = st->state;
  1404. if (cpustate < state)
  1405. continue;
  1406. ret = cpuhp_issue_call(cpu, state, true, NULL);
  1407. if (ret) {
  1408. if (teardown)
  1409. cpuhp_rollback_install(cpu, state, NULL);
  1410. cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
  1411. goto out;
  1412. }
  1413. }
  1414. out:
  1415. put_online_cpus();
  1416. if (!ret && dyn_state)
  1417. return state;
  1418. return ret;
  1419. }
  1420. EXPORT_SYMBOL(__cpuhp_setup_state);
  1421. int __cpuhp_state_remove_instance(enum cpuhp_state state,
  1422. struct hlist_node *node, bool invoke)
  1423. {
  1424. struct cpuhp_step *sp = cpuhp_get_step(state);
  1425. int cpu;
  1426. BUG_ON(cpuhp_cb_check(state));
  1427. if (!sp->multi_instance)
  1428. return -EINVAL;
  1429. get_online_cpus();
  1430. if (!invoke || !cpuhp_get_teardown_cb(state))
  1431. goto remove;
  1432. /*
  1433. * Call the teardown callback for each present cpu depending
  1434. * on the hotplug state of the cpu. This function is not
  1435. * allowed to fail currently!
  1436. */
  1437. for_each_present_cpu(cpu) {
  1438. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  1439. int cpustate = st->state;
  1440. if (cpustate >= state)
  1441. cpuhp_issue_call(cpu, state, false, node);
  1442. }
  1443. remove:
  1444. mutex_lock(&cpuhp_state_mutex);
  1445. hlist_del(node);
  1446. mutex_unlock(&cpuhp_state_mutex);
  1447. put_online_cpus();
  1448. return 0;
  1449. }
  1450. EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
  1451. /**
  1452. * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
  1453. * @state: The state to remove
  1454. * @invoke: If true, the teardown function is invoked for cpus where
  1455. * cpu state >= @state
  1456. *
  1457. * The teardown callback is currently not allowed to fail. Think
  1458. * about module removal!
  1459. */
  1460. void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
  1461. {
  1462. struct cpuhp_step *sp = cpuhp_get_step(state);
  1463. int cpu;
  1464. BUG_ON(cpuhp_cb_check(state));
  1465. get_online_cpus();
  1466. if (sp->multi_instance) {
  1467. WARN(!hlist_empty(&sp->list),
  1468. "Error: Removing state %d which has instances left.\n",
  1469. state);
  1470. goto remove;
  1471. }
  1472. if (!invoke || !cpuhp_get_teardown_cb(state))
  1473. goto remove;
  1474. /*
  1475. * Call the teardown callback for each present cpu depending
  1476. * on the hotplug state of the cpu. This function is not
  1477. * allowed to fail currently!
  1478. */
  1479. for_each_present_cpu(cpu) {
  1480. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  1481. int cpustate = st->state;
  1482. if (cpustate >= state)
  1483. cpuhp_issue_call(cpu, state, false, NULL);
  1484. }
  1485. remove:
  1486. cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
  1487. put_online_cpus();
  1488. }
  1489. EXPORT_SYMBOL(__cpuhp_remove_state);
  1490. #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
  1491. static ssize_t show_cpuhp_state(struct device *dev,
  1492. struct device_attribute *attr, char *buf)
  1493. {
  1494. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  1495. return sprintf(buf, "%d\n", st->state);
  1496. }
  1497. static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
  1498. static ssize_t write_cpuhp_target(struct device *dev,
  1499. struct device_attribute *attr,
  1500. const char *buf, size_t count)
  1501. {
  1502. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  1503. struct cpuhp_step *sp;
  1504. int target, ret;
  1505. ret = kstrtoint(buf, 10, &target);
  1506. if (ret)
  1507. return ret;
  1508. #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
  1509. if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
  1510. return -EINVAL;
  1511. #else
  1512. if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
  1513. return -EINVAL;
  1514. #endif
  1515. ret = lock_device_hotplug_sysfs();
  1516. if (ret)
  1517. return ret;
  1518. mutex_lock(&cpuhp_state_mutex);
  1519. sp = cpuhp_get_step(target);
  1520. ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
  1521. mutex_unlock(&cpuhp_state_mutex);
  1522. if (ret)
  1523. return ret;
  1524. if (st->state < target)
  1525. ret = do_cpu_up(dev->id, target);
  1526. else
  1527. ret = do_cpu_down(dev->id, target);
  1528. unlock_device_hotplug();
  1529. return ret ? ret : count;
  1530. }
  1531. static ssize_t show_cpuhp_target(struct device *dev,
  1532. struct device_attribute *attr, char *buf)
  1533. {
  1534. struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  1535. return sprintf(buf, "%d\n", st->target);
  1536. }
  1537. static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
  1538. static struct attribute *cpuhp_cpu_attrs[] = {
  1539. &dev_attr_state.attr,
  1540. &dev_attr_target.attr,
  1541. NULL
  1542. };
  1543. static struct attribute_group cpuhp_cpu_attr_group = {
  1544. .attrs = cpuhp_cpu_attrs,
  1545. .name = "hotplug",
  1546. NULL
  1547. };
  1548. static ssize_t show_cpuhp_states(struct device *dev,
  1549. struct device_attribute *attr, char *buf)
  1550. {
  1551. ssize_t cur, res = 0;
  1552. int i;
  1553. mutex_lock(&cpuhp_state_mutex);
  1554. for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
  1555. struct cpuhp_step *sp = cpuhp_get_step(i);
  1556. if (sp->name) {
  1557. cur = sprintf(buf, "%3d: %s\n", i, sp->name);
  1558. buf += cur;
  1559. res += cur;
  1560. }
  1561. }
  1562. mutex_unlock(&cpuhp_state_mutex);
  1563. return res;
  1564. }
  1565. static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
  1566. static struct attribute *cpuhp_cpu_root_attrs[] = {
  1567. &dev_attr_states.attr,
  1568. NULL
  1569. };
  1570. static struct attribute_group cpuhp_cpu_root_attr_group = {
  1571. .attrs = cpuhp_cpu_root_attrs,
  1572. .name = "hotplug",
  1573. NULL
  1574. };
  1575. static int __init cpuhp_sysfs_init(void)
  1576. {
  1577. int cpu, ret;
  1578. ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
  1579. &cpuhp_cpu_root_attr_group);
  1580. if (ret)
  1581. return ret;
  1582. for_each_possible_cpu(cpu) {
  1583. struct device *dev = get_cpu_device(cpu);
  1584. if (!dev)
  1585. continue;
  1586. ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
  1587. if (ret)
  1588. return ret;
  1589. }
  1590. return 0;
  1591. }
  1592. device_initcall(cpuhp_sysfs_init);
  1593. #endif
  1594. /*
  1595. * cpu_bit_bitmap[] is a special, "compressed" data structure that
  1596. * represents all NR_CPUS bits binary values of 1<<nr.
  1597. *
  1598. * It is used by cpumask_of() to get a constant address to a CPU
  1599. * mask value that has a single bit set only.
  1600. */
  1601. /* cpu_bit_bitmap[0] is empty - so we can back into it */
  1602. #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
  1603. #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
  1604. #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
  1605. #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
  1606. const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
  1607. MASK_DECLARE_8(0), MASK_DECLARE_8(8),
  1608. MASK_DECLARE_8(16), MASK_DECLARE_8(24),
  1609. #if BITS_PER_LONG > 32
  1610. MASK_DECLARE_8(32), MASK_DECLARE_8(40),
  1611. MASK_DECLARE_8(48), MASK_DECLARE_8(56),
  1612. #endif
  1613. };
  1614. EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
  1615. const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
  1616. EXPORT_SYMBOL(cpu_all_bits);
  1617. #ifdef CONFIG_INIT_ALL_POSSIBLE
  1618. struct cpumask __cpu_possible_mask __read_mostly
  1619. = {CPU_BITS_ALL};
  1620. #else
  1621. struct cpumask __cpu_possible_mask __read_mostly;
  1622. #endif
  1623. EXPORT_SYMBOL(__cpu_possible_mask);
  1624. struct cpumask __cpu_online_mask __read_mostly;
  1625. EXPORT_SYMBOL(__cpu_online_mask);
  1626. struct cpumask __cpu_present_mask __read_mostly;
  1627. EXPORT_SYMBOL(__cpu_present_mask);
  1628. struct cpumask __cpu_active_mask __read_mostly;
  1629. EXPORT_SYMBOL(__cpu_active_mask);
  1630. void init_cpu_present(const struct cpumask *src)
  1631. {
  1632. cpumask_copy(&__cpu_present_mask, src);
  1633. }
  1634. void init_cpu_possible(const struct cpumask *src)
  1635. {
  1636. cpumask_copy(&__cpu_possible_mask, src);
  1637. }
  1638. void init_cpu_online(const struct cpumask *src)
  1639. {
  1640. cpumask_copy(&__cpu_online_mask, src);
  1641. }
  1642. /*
  1643. * Activate the first processor.
  1644. */
  1645. void __init boot_cpu_init(void)
  1646. {
  1647. int cpu = smp_processor_id();
  1648. /* Mark the boot cpu "present", "online" etc for SMP and UP case */
  1649. set_cpu_online(cpu, true);
  1650. set_cpu_active(cpu, true);
  1651. set_cpu_present(cpu, true);
  1652. set_cpu_possible(cpu, true);
  1653. }
  1654. /*
  1655. * Must be called _AFTER_ setting up the per_cpu areas
  1656. */
  1657. void __init boot_cpu_state_init(void)
  1658. {
  1659. per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
  1660. }