smp.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. /*
  2. * Generic helpers for smp ipi calls
  3. *
  4. * (C) Jens Axboe <jens.axboe@oracle.com> 2008
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/irq_work.h>
  8. #include <linux/rcupdate.h>
  9. #include <linux/rculist.h>
  10. #include <linux/kernel.h>
  11. #include <linux/export.h>
  12. #include <linux/percpu.h>
  13. #include <linux/init.h>
  14. #include <linux/gfp.h>
  15. #include <linux/smp.h>
  16. #include <linux/cpu.h>
  17. #include <linux/sched.h>
  18. #include <linux/sched/idle.h>
  19. #include <linux/hypervisor.h>
  20. #include "smpboot.h"
  21. enum {
  22. CSD_FLAG_LOCK = 0x01,
  23. CSD_FLAG_SYNCHRONOUS = 0x02,
  24. };
  25. struct call_function_data {
  26. struct call_single_data __percpu *csd;
  27. cpumask_var_t cpumask;
  28. cpumask_var_t cpumask_ipi;
  29. };
  30. static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
  31. static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
  32. static void flush_smp_call_function_queue(bool warn_cpu_offline);
  33. int smpcfd_prepare_cpu(unsigned int cpu)
  34. {
  35. struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
  36. if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
  37. cpu_to_node(cpu)))
  38. return -ENOMEM;
  39. if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
  40. cpu_to_node(cpu))) {
  41. free_cpumask_var(cfd->cpumask);
  42. return -ENOMEM;
  43. }
  44. cfd->csd = alloc_percpu(struct call_single_data);
  45. if (!cfd->csd) {
  46. free_cpumask_var(cfd->cpumask);
  47. free_cpumask_var(cfd->cpumask_ipi);
  48. return -ENOMEM;
  49. }
  50. return 0;
  51. }
  52. int smpcfd_dead_cpu(unsigned int cpu)
  53. {
  54. struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
  55. free_cpumask_var(cfd->cpumask);
  56. free_cpumask_var(cfd->cpumask_ipi);
  57. free_percpu(cfd->csd);
  58. return 0;
  59. }
  60. int smpcfd_dying_cpu(unsigned int cpu)
  61. {
  62. /*
  63. * The IPIs for the smp-call-function callbacks queued by other
  64. * CPUs might arrive late, either due to hardware latencies or
  65. * because this CPU disabled interrupts (inside stop-machine)
  66. * before the IPIs were sent. So flush out any pending callbacks
  67. * explicitly (without waiting for the IPIs to arrive), to
  68. * ensure that the outgoing CPU doesn't go offline with work
  69. * still pending.
  70. */
  71. flush_smp_call_function_queue(false);
  72. return 0;
  73. }
  74. void __init call_function_init(void)
  75. {
  76. int i;
  77. for_each_possible_cpu(i)
  78. init_llist_head(&per_cpu(call_single_queue, i));
  79. smpcfd_prepare_cpu(smp_processor_id());
  80. }
  81. /*
  82. * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
  83. *
  84. * For non-synchronous ipi calls the csd can still be in use by the
  85. * previous function call. For multi-cpu calls its even more interesting
  86. * as we'll have to ensure no other cpu is observing our csd.
  87. */
  88. static __always_inline void csd_lock_wait(struct call_single_data *csd)
  89. {
  90. smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
  91. }
  92. static __always_inline void csd_lock(struct call_single_data *csd)
  93. {
  94. csd_lock_wait(csd);
  95. csd->flags |= CSD_FLAG_LOCK;
  96. /*
  97. * prevent CPU from reordering the above assignment
  98. * to ->flags with any subsequent assignments to other
  99. * fields of the specified call_single_data structure:
  100. */
  101. smp_wmb();
  102. }
  103. static __always_inline void csd_unlock(struct call_single_data *csd)
  104. {
  105. WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
  106. /*
  107. * ensure we're all done before releasing data:
  108. */
  109. smp_store_release(&csd->flags, 0);
  110. }
  111. static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
  112. /*
  113. * Insert a previously allocated call_single_data element
  114. * for execution on the given CPU. data must already have
  115. * ->func, ->info, and ->flags set.
  116. */
  117. static int generic_exec_single(int cpu, struct call_single_data *csd,
  118. smp_call_func_t func, void *info)
  119. {
  120. if (cpu == smp_processor_id()) {
  121. unsigned long flags;
  122. /*
  123. * We can unlock early even for the synchronous on-stack case,
  124. * since we're doing this from the same CPU..
  125. */
  126. csd_unlock(csd);
  127. local_irq_save(flags);
  128. func(info);
  129. local_irq_restore(flags);
  130. return 0;
  131. }
  132. if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
  133. csd_unlock(csd);
  134. return -ENXIO;
  135. }
  136. csd->func = func;
  137. csd->info = info;
  138. /*
  139. * The list addition should be visible before sending the IPI
  140. * handler locks the list to pull the entry off it because of
  141. * normal cache coherency rules implied by spinlocks.
  142. *
  143. * If IPIs can go out of order to the cache coherency protocol
  144. * in an architecture, sufficient synchronisation should be added
  145. * to arch code to make it appear to obey cache coherency WRT
  146. * locking and barrier primitives. Generic code isn't really
  147. * equipped to do the right thing...
  148. */
  149. if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
  150. arch_send_call_function_single_ipi(cpu);
  151. return 0;
  152. }
  153. /**
  154. * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
  155. *
  156. * Invoked by arch to handle an IPI for call function single.
  157. * Must be called with interrupts disabled.
  158. */
  159. void generic_smp_call_function_single_interrupt(void)
  160. {
  161. flush_smp_call_function_queue(true);
  162. }
  163. /**
  164. * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
  165. *
  166. * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
  167. * offline CPU. Skip this check if set to 'false'.
  168. *
  169. * Flush any pending smp-call-function callbacks queued on this CPU. This is
  170. * invoked by the generic IPI handler, as well as by a CPU about to go offline,
  171. * to ensure that all pending IPI callbacks are run before it goes completely
  172. * offline.
  173. *
  174. * Loop through the call_single_queue and run all the queued callbacks.
  175. * Must be called with interrupts disabled.
  176. */
  177. static void flush_smp_call_function_queue(bool warn_cpu_offline)
  178. {
  179. struct llist_head *head;
  180. struct llist_node *entry;
  181. struct call_single_data *csd, *csd_next;
  182. static bool warned;
  183. WARN_ON(!irqs_disabled());
  184. head = this_cpu_ptr(&call_single_queue);
  185. entry = llist_del_all(head);
  186. entry = llist_reverse_order(entry);
  187. /* There shouldn't be any pending callbacks on an offline CPU. */
  188. if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
  189. !warned && !llist_empty(head))) {
  190. warned = true;
  191. WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
  192. /*
  193. * We don't have to use the _safe() variant here
  194. * because we are not invoking the IPI handlers yet.
  195. */
  196. llist_for_each_entry(csd, entry, llist)
  197. pr_warn("IPI callback %pS sent to offline CPU\n",
  198. csd->func);
  199. }
  200. llist_for_each_entry_safe(csd, csd_next, entry, llist) {
  201. smp_call_func_t func = csd->func;
  202. void *info = csd->info;
  203. /* Do we wait until *after* callback? */
  204. if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
  205. func(info);
  206. csd_unlock(csd);
  207. } else {
  208. csd_unlock(csd);
  209. func(info);
  210. }
  211. }
  212. /*
  213. * Handle irq works queued remotely by irq_work_queue_on().
  214. * Smp functions above are typically synchronous so they
  215. * better run first since some other CPUs may be busy waiting
  216. * for them.
  217. */
  218. irq_work_run();
  219. }
  220. /*
  221. * smp_call_function_single - Run a function on a specific CPU
  222. * @func: The function to run. This must be fast and non-blocking.
  223. * @info: An arbitrary pointer to pass to the function.
  224. * @wait: If true, wait until function has completed on other CPUs.
  225. *
  226. * Returns 0 on success, else a negative status code.
  227. */
  228. int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
  229. int wait)
  230. {
  231. struct call_single_data *csd;
  232. struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
  233. int this_cpu;
  234. int err;
  235. /*
  236. * prevent preemption and reschedule on another processor,
  237. * as well as CPU removal
  238. */
  239. this_cpu = get_cpu();
  240. /*
  241. * Can deadlock when called with interrupts disabled.
  242. * We allow cpu's that are not yet online though, as no one else can
  243. * send smp call function interrupt to this cpu and as such deadlocks
  244. * can't happen.
  245. */
  246. WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
  247. && !oops_in_progress);
  248. csd = &csd_stack;
  249. if (!wait) {
  250. csd = this_cpu_ptr(&csd_data);
  251. csd_lock(csd);
  252. }
  253. err = generic_exec_single(cpu, csd, func, info);
  254. if (wait)
  255. csd_lock_wait(csd);
  256. put_cpu();
  257. return err;
  258. }
  259. EXPORT_SYMBOL(smp_call_function_single);
  260. /**
  261. * smp_call_function_single_async(): Run an asynchronous function on a
  262. * specific CPU.
  263. * @cpu: The CPU to run on.
  264. * @csd: Pre-allocated and setup data structure
  265. *
  266. * Like smp_call_function_single(), but the call is asynchonous and
  267. * can thus be done from contexts with disabled interrupts.
  268. *
  269. * The caller passes his own pre-allocated data structure
  270. * (ie: embedded in an object) and is responsible for synchronizing it
  271. * such that the IPIs performed on the @csd are strictly serialized.
  272. *
  273. * NOTE: Be careful, there is unfortunately no current debugging facility to
  274. * validate the correctness of this serialization.
  275. */
  276. int smp_call_function_single_async(int cpu, struct call_single_data *csd)
  277. {
  278. int err = 0;
  279. preempt_disable();
  280. /* We could deadlock if we have to wait here with interrupts disabled! */
  281. if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
  282. csd_lock_wait(csd);
  283. csd->flags = CSD_FLAG_LOCK;
  284. smp_wmb();
  285. err = generic_exec_single(cpu, csd, csd->func, csd->info);
  286. preempt_enable();
  287. return err;
  288. }
  289. EXPORT_SYMBOL_GPL(smp_call_function_single_async);
  290. /*
  291. * smp_call_function_any - Run a function on any of the given cpus
  292. * @mask: The mask of cpus it can run on.
  293. * @func: The function to run. This must be fast and non-blocking.
  294. * @info: An arbitrary pointer to pass to the function.
  295. * @wait: If true, wait until function has completed.
  296. *
  297. * Returns 0 on success, else a negative status code (if no cpus were online).
  298. *
  299. * Selection preference:
  300. * 1) current cpu if in @mask
  301. * 2) any cpu of current node if in @mask
  302. * 3) any other online cpu in @mask
  303. */
  304. int smp_call_function_any(const struct cpumask *mask,
  305. smp_call_func_t func, void *info, int wait)
  306. {
  307. unsigned int cpu;
  308. const struct cpumask *nodemask;
  309. int ret;
  310. /* Try for same CPU (cheapest) */
  311. cpu = get_cpu();
  312. if (cpumask_test_cpu(cpu, mask))
  313. goto call;
  314. /* Try for same node. */
  315. nodemask = cpumask_of_node(cpu_to_node(cpu));
  316. for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
  317. cpu = cpumask_next_and(cpu, nodemask, mask)) {
  318. if (cpu_online(cpu))
  319. goto call;
  320. }
  321. /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
  322. cpu = cpumask_any_and(mask, cpu_online_mask);
  323. call:
  324. ret = smp_call_function_single(cpu, func, info, wait);
  325. put_cpu();
  326. return ret;
  327. }
  328. EXPORT_SYMBOL_GPL(smp_call_function_any);
  329. /**
  330. * smp_call_function_many(): Run a function on a set of other CPUs.
  331. * @mask: The set of cpus to run on (only runs on online subset).
  332. * @func: The function to run. This must be fast and non-blocking.
  333. * @info: An arbitrary pointer to pass to the function.
  334. * @wait: If true, wait (atomically) until function has completed
  335. * on other CPUs.
  336. *
  337. * If @wait is true, then returns once @func has returned.
  338. *
  339. * You must not call this function with disabled interrupts or from a
  340. * hardware interrupt handler or from a bottom half handler. Preemption
  341. * must be disabled when calling this function.
  342. */
  343. void smp_call_function_many(const struct cpumask *mask,
  344. smp_call_func_t func, void *info, bool wait)
  345. {
  346. struct call_function_data *cfd;
  347. int cpu, next_cpu, this_cpu = smp_processor_id();
  348. /*
  349. * Can deadlock when called with interrupts disabled.
  350. * We allow cpu's that are not yet online though, as no one else can
  351. * send smp call function interrupt to this cpu and as such deadlocks
  352. * can't happen.
  353. */
  354. WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
  355. && !oops_in_progress && !early_boot_irqs_disabled);
  356. /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
  357. cpu = cpumask_first_and(mask, cpu_online_mask);
  358. if (cpu == this_cpu)
  359. cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
  360. /* No online cpus? We're done. */
  361. if (cpu >= nr_cpu_ids)
  362. return;
  363. /* Do we have another CPU which isn't us? */
  364. next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
  365. if (next_cpu == this_cpu)
  366. next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
  367. /* Fastpath: do that cpu by itself. */
  368. if (next_cpu >= nr_cpu_ids) {
  369. smp_call_function_single(cpu, func, info, wait);
  370. return;
  371. }
  372. cfd = this_cpu_ptr(&cfd_data);
  373. cpumask_and(cfd->cpumask, mask, cpu_online_mask);
  374. __cpumask_clear_cpu(this_cpu, cfd->cpumask);
  375. /* Some callers race with other cpus changing the passed mask */
  376. if (unlikely(!cpumask_weight(cfd->cpumask)))
  377. return;
  378. cpumask_clear(cfd->cpumask_ipi);
  379. for_each_cpu(cpu, cfd->cpumask) {
  380. struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
  381. csd_lock(csd);
  382. if (wait)
  383. csd->flags |= CSD_FLAG_SYNCHRONOUS;
  384. csd->func = func;
  385. csd->info = info;
  386. if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
  387. __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
  388. }
  389. /* Send a message to all CPUs in the map */
  390. arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
  391. if (wait) {
  392. for_each_cpu(cpu, cfd->cpumask) {
  393. struct call_single_data *csd;
  394. csd = per_cpu_ptr(cfd->csd, cpu);
  395. csd_lock_wait(csd);
  396. }
  397. }
  398. }
  399. EXPORT_SYMBOL(smp_call_function_many);
  400. /**
  401. * smp_call_function(): Run a function on all other CPUs.
  402. * @func: The function to run. This must be fast and non-blocking.
  403. * @info: An arbitrary pointer to pass to the function.
  404. * @wait: If true, wait (atomically) until function has completed
  405. * on other CPUs.
  406. *
  407. * Returns 0.
  408. *
  409. * If @wait is true, then returns once @func has returned; otherwise
  410. * it returns just before the target cpu calls @func.
  411. *
  412. * You must not call this function with disabled interrupts or from a
  413. * hardware interrupt handler or from a bottom half handler.
  414. */
  415. int smp_call_function(smp_call_func_t func, void *info, int wait)
  416. {
  417. preempt_disable();
  418. smp_call_function_many(cpu_online_mask, func, info, wait);
  419. preempt_enable();
  420. return 0;
  421. }
  422. EXPORT_SYMBOL(smp_call_function);
  423. /* Setup configured maximum number of CPUs to activate */
  424. unsigned int setup_max_cpus = NR_CPUS;
  425. EXPORT_SYMBOL(setup_max_cpus);
  426. /*
  427. * Setup routine for controlling SMP activation
  428. *
  429. * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
  430. * activation entirely (the MPS table probe still happens, though).
  431. *
  432. * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
  433. * greater than 0, limits the maximum number of CPUs activated in
  434. * SMP mode to <NUM>.
  435. */
  436. void __weak arch_disable_smp_support(void) { }
  437. static int __init nosmp(char *str)
  438. {
  439. setup_max_cpus = 0;
  440. arch_disable_smp_support();
  441. return 0;
  442. }
  443. early_param("nosmp", nosmp);
  444. /* this is hard limit */
  445. static int __init nrcpus(char *str)
  446. {
  447. int nr_cpus;
  448. get_option(&str, &nr_cpus);
  449. if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
  450. nr_cpu_ids = nr_cpus;
  451. return 0;
  452. }
  453. early_param("nr_cpus", nrcpus);
  454. static int __init maxcpus(char *str)
  455. {
  456. get_option(&str, &setup_max_cpus);
  457. if (setup_max_cpus == 0)
  458. arch_disable_smp_support();
  459. return 0;
  460. }
  461. early_param("maxcpus", maxcpus);
  462. /* Setup number of possible processor ids */
  463. int nr_cpu_ids __read_mostly = NR_CPUS;
  464. EXPORT_SYMBOL(nr_cpu_ids);
  465. /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
  466. void __init setup_nr_cpu_ids(void)
  467. {
  468. nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
  469. }
  470. /* Called by boot processor to activate the rest. */
  471. void __init smp_init(void)
  472. {
  473. int num_nodes, num_cpus;
  474. unsigned int cpu;
  475. idle_threads_init();
  476. cpuhp_threads_init();
  477. pr_info("Bringing up secondary CPUs ...\n");
  478. /* FIXME: This should be done in userspace --RR */
  479. for_each_present_cpu(cpu) {
  480. if (num_online_cpus() >= setup_max_cpus)
  481. break;
  482. if (!cpu_online(cpu))
  483. cpu_up(cpu);
  484. }
  485. num_nodes = num_online_nodes();
  486. num_cpus = num_online_cpus();
  487. pr_info("Brought up %d node%s, %d CPU%s\n",
  488. num_nodes, (num_nodes > 1 ? "s" : ""),
  489. num_cpus, (num_cpus > 1 ? "s" : ""));
  490. /* Any cleanup work */
  491. smp_cpus_done(setup_max_cpus);
  492. }
  493. /*
  494. * Call a function on all processors. May be used during early boot while
  495. * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
  496. * of local_irq_disable/enable().
  497. */
  498. int on_each_cpu(void (*func) (void *info), void *info, int wait)
  499. {
  500. unsigned long flags;
  501. int ret = 0;
  502. preempt_disable();
  503. ret = smp_call_function(func, info, wait);
  504. local_irq_save(flags);
  505. func(info);
  506. local_irq_restore(flags);
  507. preempt_enable();
  508. return ret;
  509. }
  510. EXPORT_SYMBOL(on_each_cpu);
  511. /**
  512. * on_each_cpu_mask(): Run a function on processors specified by
  513. * cpumask, which may include the local processor.
  514. * @mask: The set of cpus to run on (only runs on online subset).
  515. * @func: The function to run. This must be fast and non-blocking.
  516. * @info: An arbitrary pointer to pass to the function.
  517. * @wait: If true, wait (atomically) until function has completed
  518. * on other CPUs.
  519. *
  520. * If @wait is true, then returns once @func has returned.
  521. *
  522. * You must not call this function with disabled interrupts or from a
  523. * hardware interrupt handler or from a bottom half handler. The
  524. * exception is that it may be used during early boot while
  525. * early_boot_irqs_disabled is set.
  526. */
  527. void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
  528. void *info, bool wait)
  529. {
  530. int cpu = get_cpu();
  531. smp_call_function_many(mask, func, info, wait);
  532. if (cpumask_test_cpu(cpu, mask)) {
  533. unsigned long flags;
  534. local_irq_save(flags);
  535. func(info);
  536. local_irq_restore(flags);
  537. }
  538. put_cpu();
  539. }
  540. EXPORT_SYMBOL(on_each_cpu_mask);
  541. /*
  542. * on_each_cpu_cond(): Call a function on each processor for which
  543. * the supplied function cond_func returns true, optionally waiting
  544. * for all the required CPUs to finish. This may include the local
  545. * processor.
  546. * @cond_func: A callback function that is passed a cpu id and
  547. * the the info parameter. The function is called
  548. * with preemption disabled. The function should
  549. * return a blooean value indicating whether to IPI
  550. * the specified CPU.
  551. * @func: The function to run on all applicable CPUs.
  552. * This must be fast and non-blocking.
  553. * @info: An arbitrary pointer to pass to both functions.
  554. * @wait: If true, wait (atomically) until function has
  555. * completed on other CPUs.
  556. * @gfp_flags: GFP flags to use when allocating the cpumask
  557. * used internally by the function.
  558. *
  559. * The function might sleep if the GFP flags indicates a non
  560. * atomic allocation is allowed.
  561. *
  562. * Preemption is disabled to protect against CPUs going offline but not online.
  563. * CPUs going online during the call will not be seen or sent an IPI.
  564. *
  565. * You must not call this function with disabled interrupts or
  566. * from a hardware interrupt handler or from a bottom half handler.
  567. */
  568. void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
  569. smp_call_func_t func, void *info, bool wait,
  570. gfp_t gfp_flags)
  571. {
  572. cpumask_var_t cpus;
  573. int cpu, ret;
  574. might_sleep_if(gfpflags_allow_blocking(gfp_flags));
  575. if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
  576. preempt_disable();
  577. for_each_online_cpu(cpu)
  578. if (cond_func(cpu, info))
  579. cpumask_set_cpu(cpu, cpus);
  580. on_each_cpu_mask(cpus, func, info, wait);
  581. preempt_enable();
  582. free_cpumask_var(cpus);
  583. } else {
  584. /*
  585. * No free cpumask, bother. No matter, we'll
  586. * just have to IPI them one by one.
  587. */
  588. preempt_disable();
  589. for_each_online_cpu(cpu)
  590. if (cond_func(cpu, info)) {
  591. ret = smp_call_function_single(cpu, func,
  592. info, wait);
  593. WARN_ON_ONCE(ret);
  594. }
  595. preempt_enable();
  596. }
  597. }
  598. EXPORT_SYMBOL(on_each_cpu_cond);
  599. static void do_nothing(void *unused)
  600. {
  601. }
  602. /**
  603. * kick_all_cpus_sync - Force all cpus out of idle
  604. *
  605. * Used to synchronize the update of pm_idle function pointer. It's
  606. * called after the pointer is updated and returns after the dummy
  607. * callback function has been executed on all cpus. The execution of
  608. * the function can only happen on the remote cpus after they have
  609. * left the idle function which had been called via pm_idle function
  610. * pointer. So it's guaranteed that nothing uses the previous pointer
  611. * anymore.
  612. */
  613. void kick_all_cpus_sync(void)
  614. {
  615. /* Make sure the change is visible before we kick the cpus */
  616. smp_mb();
  617. smp_call_function(do_nothing, NULL, 1);
  618. }
  619. EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
  620. /**
  621. * wake_up_all_idle_cpus - break all cpus out of idle
  622. * wake_up_all_idle_cpus try to break all cpus which is in idle state even
  623. * including idle polling cpus, for non-idle cpus, we will do nothing
  624. * for them.
  625. */
  626. void wake_up_all_idle_cpus(void)
  627. {
  628. int cpu;
  629. preempt_disable();
  630. for_each_online_cpu(cpu) {
  631. if (cpu == smp_processor_id())
  632. continue;
  633. wake_up_if_idle(cpu);
  634. }
  635. preempt_enable();
  636. }
  637. EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
  638. /**
  639. * smp_call_on_cpu - Call a function on a specific cpu
  640. *
  641. * Used to call a function on a specific cpu and wait for it to return.
  642. * Optionally make sure the call is done on a specified physical cpu via vcpu
  643. * pinning in order to support virtualized environments.
  644. */
  645. struct smp_call_on_cpu_struct {
  646. struct work_struct work;
  647. struct completion done;
  648. int (*func)(void *);
  649. void *data;
  650. int ret;
  651. int cpu;
  652. };
  653. static void smp_call_on_cpu_callback(struct work_struct *work)
  654. {
  655. struct smp_call_on_cpu_struct *sscs;
  656. sscs = container_of(work, struct smp_call_on_cpu_struct, work);
  657. if (sscs->cpu >= 0)
  658. hypervisor_pin_vcpu(sscs->cpu);
  659. sscs->ret = sscs->func(sscs->data);
  660. if (sscs->cpu >= 0)
  661. hypervisor_pin_vcpu(-1);
  662. complete(&sscs->done);
  663. }
  664. int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
  665. {
  666. struct smp_call_on_cpu_struct sscs = {
  667. .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
  668. .func = func,
  669. .data = par,
  670. .cpu = phys ? cpu : -1,
  671. };
  672. INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
  673. if (cpu >= nr_cpu_ids || !cpu_online(cpu))
  674. return -ENXIO;
  675. queue_work_on(cpu, system_wq, &sscs.work);
  676. wait_for_completion(&sscs.done);
  677. return sscs.ret;
  678. }
  679. EXPORT_SYMBOL_GPL(smp_call_on_cpu);