tick-broadcast.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010
  1. /*
  2. * linux/kernel/time/tick-broadcast.c
  3. *
  4. * This file contains functions which emulate a local clock-event
  5. * device via a broadcast event source.
  6. *
  7. * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
  8. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  9. * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
  10. *
  11. * This code is licenced under the GPL version 2. For details see
  12. * kernel-base/COPYING.
  13. */
  14. #include <linux/cpu.h>
  15. #include <linux/err.h>
  16. #include <linux/hrtimer.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/percpu.h>
  19. #include <linux/profile.h>
  20. #include <linux/sched.h>
  21. #include <linux/smp.h>
  22. #include <linux/module.h>
  23. #include "tick-internal.h"
  24. /*
  25. * Broadcast support for broken x86 hardware, where the local apic
  26. * timer stops in C3 state.
  27. */
  28. static struct tick_device tick_broadcast_device;
  29. static cpumask_var_t tick_broadcast_mask __cpumask_var_read_mostly;
  30. static cpumask_var_t tick_broadcast_on __cpumask_var_read_mostly;
  31. static cpumask_var_t tmpmask __cpumask_var_read_mostly;
  32. static int tick_broadcast_forced;
  33. static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
  34. #ifdef CONFIG_TICK_ONESHOT
  35. static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
  36. static void tick_broadcast_clear_oneshot(int cpu);
  37. static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
  38. #else
  39. static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
  40. static inline void tick_broadcast_clear_oneshot(int cpu) { }
  41. static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
  42. #endif
  43. /*
  44. * Debugging: see timer_list.c
  45. */
  46. struct tick_device *tick_get_broadcast_device(void)
  47. {
  48. return &tick_broadcast_device;
  49. }
  50. struct cpumask *tick_get_broadcast_mask(void)
  51. {
  52. return tick_broadcast_mask;
  53. }
  54. /*
  55. * Start the device in periodic mode
  56. */
  57. static void tick_broadcast_start_periodic(struct clock_event_device *bc)
  58. {
  59. if (bc)
  60. tick_setup_periodic(bc, 1);
  61. }
  62. /*
  63. * Check, if the device can be utilized as broadcast device:
  64. */
  65. static bool tick_check_broadcast_device(struct clock_event_device *curdev,
  66. struct clock_event_device *newdev)
  67. {
  68. if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
  69. (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
  70. (newdev->features & CLOCK_EVT_FEAT_C3STOP))
  71. return false;
  72. if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
  73. !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
  74. return false;
  75. return !curdev || newdev->rating > curdev->rating;
  76. }
  77. /*
  78. * Conditionally install/replace broadcast device
  79. */
  80. void tick_install_broadcast_device(struct clock_event_device *dev)
  81. {
  82. struct clock_event_device *cur = tick_broadcast_device.evtdev;
  83. if (!tick_check_broadcast_device(cur, dev))
  84. return;
  85. if (!try_module_get(dev->owner))
  86. return;
  87. clockevents_exchange_device(cur, dev);
  88. if (cur)
  89. cur->event_handler = clockevents_handle_noop;
  90. tick_broadcast_device.evtdev = dev;
  91. if (!cpumask_empty(tick_broadcast_mask))
  92. tick_broadcast_start_periodic(dev);
  93. /*
  94. * Inform all cpus about this. We might be in a situation
  95. * where we did not switch to oneshot mode because the per cpu
  96. * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
  97. * of a oneshot capable broadcast device. Without that
  98. * notification the systems stays stuck in periodic mode
  99. * forever.
  100. */
  101. if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
  102. tick_clock_notify();
  103. }
  104. /*
  105. * Check, if the device is the broadcast device
  106. */
  107. int tick_is_broadcast_device(struct clock_event_device *dev)
  108. {
  109. return (dev && tick_broadcast_device.evtdev == dev);
  110. }
  111. int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
  112. {
  113. int ret = -ENODEV;
  114. if (tick_is_broadcast_device(dev)) {
  115. raw_spin_lock(&tick_broadcast_lock);
  116. ret = __clockevents_update_freq(dev, freq);
  117. raw_spin_unlock(&tick_broadcast_lock);
  118. }
  119. return ret;
  120. }
  121. static void err_broadcast(const struct cpumask *mask)
  122. {
  123. pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
  124. }
  125. static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
  126. {
  127. if (!dev->broadcast)
  128. dev->broadcast = tick_broadcast;
  129. if (!dev->broadcast) {
  130. pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
  131. dev->name);
  132. dev->broadcast = err_broadcast;
  133. }
  134. }
  135. /*
  136. * Check, if the device is disfunctional and a place holder, which
  137. * needs to be handled by the broadcast device.
  138. */
  139. int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
  140. {
  141. struct clock_event_device *bc = tick_broadcast_device.evtdev;
  142. unsigned long flags;
  143. int ret = 0;
  144. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  145. /*
  146. * Devices might be registered with both periodic and oneshot
  147. * mode disabled. This signals, that the device needs to be
  148. * operated from the broadcast device and is a placeholder for
  149. * the cpu local device.
  150. */
  151. if (!tick_device_is_functional(dev)) {
  152. dev->event_handler = tick_handle_periodic;
  153. tick_device_setup_broadcast_func(dev);
  154. cpumask_set_cpu(cpu, tick_broadcast_mask);
  155. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
  156. tick_broadcast_start_periodic(bc);
  157. else
  158. tick_broadcast_setup_oneshot(bc);
  159. ret = 1;
  160. } else {
  161. /*
  162. * Clear the broadcast bit for this cpu if the
  163. * device is not power state affected.
  164. */
  165. if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
  166. cpumask_clear_cpu(cpu, tick_broadcast_mask);
  167. else
  168. tick_device_setup_broadcast_func(dev);
  169. /*
  170. * Clear the broadcast bit if the CPU is not in
  171. * periodic broadcast on state.
  172. */
  173. if (!cpumask_test_cpu(cpu, tick_broadcast_on))
  174. cpumask_clear_cpu(cpu, tick_broadcast_mask);
  175. switch (tick_broadcast_device.mode) {
  176. case TICKDEV_MODE_ONESHOT:
  177. /*
  178. * If the system is in oneshot mode we can
  179. * unconditionally clear the oneshot mask bit,
  180. * because the CPU is running and therefore
  181. * not in an idle state which causes the power
  182. * state affected device to stop. Let the
  183. * caller initialize the device.
  184. */
  185. tick_broadcast_clear_oneshot(cpu);
  186. ret = 0;
  187. break;
  188. case TICKDEV_MODE_PERIODIC:
  189. /*
  190. * If the system is in periodic mode, check
  191. * whether the broadcast device can be
  192. * switched off now.
  193. */
  194. if (cpumask_empty(tick_broadcast_mask) && bc)
  195. clockevents_shutdown(bc);
  196. /*
  197. * If we kept the cpu in the broadcast mask,
  198. * tell the caller to leave the per cpu device
  199. * in shutdown state. The periodic interrupt
  200. * is delivered by the broadcast device, if
  201. * the broadcast device exists and is not
  202. * hrtimer based.
  203. */
  204. if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
  205. ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
  206. break;
  207. default:
  208. break;
  209. }
  210. }
  211. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  212. return ret;
  213. }
  214. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  215. int tick_receive_broadcast(void)
  216. {
  217. struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
  218. struct clock_event_device *evt = td->evtdev;
  219. if (!evt)
  220. return -ENODEV;
  221. if (!evt->event_handler)
  222. return -EINVAL;
  223. evt->event_handler(evt);
  224. return 0;
  225. }
  226. #endif
  227. /*
  228. * Broadcast the event to the cpus, which are set in the mask (mangled).
  229. */
  230. static bool tick_do_broadcast(struct cpumask *mask)
  231. {
  232. int cpu = smp_processor_id();
  233. struct tick_device *td;
  234. bool local = false;
  235. /*
  236. * Check, if the current cpu is in the mask
  237. */
  238. if (cpumask_test_cpu(cpu, mask)) {
  239. struct clock_event_device *bc = tick_broadcast_device.evtdev;
  240. cpumask_clear_cpu(cpu, mask);
  241. /*
  242. * We only run the local handler, if the broadcast
  243. * device is not hrtimer based. Otherwise we run into
  244. * a hrtimer recursion.
  245. *
  246. * local timer_interrupt()
  247. * local_handler()
  248. * expire_hrtimers()
  249. * bc_handler()
  250. * local_handler()
  251. * expire_hrtimers()
  252. */
  253. local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
  254. }
  255. if (!cpumask_empty(mask)) {
  256. /*
  257. * It might be necessary to actually check whether the devices
  258. * have different broadcast functions. For now, just use the
  259. * one of the first device. This works as long as we have this
  260. * misfeature only on x86 (lapic)
  261. */
  262. td = &per_cpu(tick_cpu_device, cpumask_first(mask));
  263. td->evtdev->broadcast(mask);
  264. }
  265. return local;
  266. }
  267. /*
  268. * Periodic broadcast:
  269. * - invoke the broadcast handlers
  270. */
  271. static bool tick_do_periodic_broadcast(void)
  272. {
  273. cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
  274. return tick_do_broadcast(tmpmask);
  275. }
  276. /*
  277. * Event handler for periodic broadcast ticks
  278. */
  279. static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
  280. {
  281. struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
  282. bool bc_local;
  283. raw_spin_lock(&tick_broadcast_lock);
  284. /* Handle spurious interrupts gracefully */
  285. if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
  286. raw_spin_unlock(&tick_broadcast_lock);
  287. return;
  288. }
  289. bc_local = tick_do_periodic_broadcast();
  290. if (clockevent_state_oneshot(dev)) {
  291. ktime_t next = ktime_add(dev->next_event, tick_period);
  292. clockevents_program_event(dev, next, true);
  293. }
  294. raw_spin_unlock(&tick_broadcast_lock);
  295. /*
  296. * We run the handler of the local cpu after dropping
  297. * tick_broadcast_lock because the handler might deadlock when
  298. * trying to switch to oneshot mode.
  299. */
  300. if (bc_local)
  301. td->evtdev->event_handler(td->evtdev);
  302. }
  303. /**
  304. * tick_broadcast_control - Enable/disable or force broadcast mode
  305. * @mode: The selected broadcast mode
  306. *
  307. * Called when the system enters a state where affected tick devices
  308. * might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
  309. */
  310. void tick_broadcast_control(enum tick_broadcast_mode mode)
  311. {
  312. struct clock_event_device *bc, *dev;
  313. struct tick_device *td;
  314. int cpu, bc_stopped;
  315. unsigned long flags;
  316. /* Protects also the local clockevent device. */
  317. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  318. td = this_cpu_ptr(&tick_cpu_device);
  319. dev = td->evtdev;
  320. /*
  321. * Is the device not affected by the powerstate ?
  322. */
  323. if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
  324. goto out;
  325. if (!tick_device_is_functional(dev))
  326. goto out;
  327. cpu = smp_processor_id();
  328. bc = tick_broadcast_device.evtdev;
  329. bc_stopped = cpumask_empty(tick_broadcast_mask);
  330. switch (mode) {
  331. case TICK_BROADCAST_FORCE:
  332. tick_broadcast_forced = 1;
  333. case TICK_BROADCAST_ON:
  334. cpumask_set_cpu(cpu, tick_broadcast_on);
  335. if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
  336. /*
  337. * Only shutdown the cpu local device, if:
  338. *
  339. * - the broadcast device exists
  340. * - the broadcast device is not a hrtimer based one
  341. * - the broadcast device is in periodic mode to
  342. * avoid a hickup during switch to oneshot mode
  343. */
  344. if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
  345. tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
  346. clockevents_shutdown(dev);
  347. }
  348. break;
  349. case TICK_BROADCAST_OFF:
  350. if (tick_broadcast_forced)
  351. break;
  352. cpumask_clear_cpu(cpu, tick_broadcast_on);
  353. if (!tick_device_is_functional(dev))
  354. break;
  355. if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
  356. if (tick_broadcast_device.mode ==
  357. TICKDEV_MODE_PERIODIC)
  358. tick_setup_periodic(dev, 0);
  359. }
  360. break;
  361. }
  362. if (bc) {
  363. if (cpumask_empty(tick_broadcast_mask)) {
  364. if (!bc_stopped)
  365. clockevents_shutdown(bc);
  366. } else if (bc_stopped) {
  367. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
  368. tick_broadcast_start_periodic(bc);
  369. else
  370. tick_broadcast_setup_oneshot(bc);
  371. }
  372. }
  373. out:
  374. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  375. }
  376. EXPORT_SYMBOL_GPL(tick_broadcast_control);
  377. /*
  378. * Set the periodic handler depending on broadcast on/off
  379. */
  380. void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
  381. {
  382. if (!broadcast)
  383. dev->event_handler = tick_handle_periodic;
  384. else
  385. dev->event_handler = tick_handle_periodic_broadcast;
  386. }
  387. #ifdef CONFIG_HOTPLUG_CPU
  388. /*
  389. * Remove a CPU from broadcasting
  390. */
  391. void tick_shutdown_broadcast(unsigned int cpu)
  392. {
  393. struct clock_event_device *bc;
  394. unsigned long flags;
  395. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  396. bc = tick_broadcast_device.evtdev;
  397. cpumask_clear_cpu(cpu, tick_broadcast_mask);
  398. cpumask_clear_cpu(cpu, tick_broadcast_on);
  399. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
  400. if (bc && cpumask_empty(tick_broadcast_mask))
  401. clockevents_shutdown(bc);
  402. }
  403. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  404. }
  405. #endif
  406. void tick_suspend_broadcast(void)
  407. {
  408. struct clock_event_device *bc;
  409. unsigned long flags;
  410. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  411. bc = tick_broadcast_device.evtdev;
  412. if (bc)
  413. clockevents_shutdown(bc);
  414. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  415. }
  416. /*
  417. * This is called from tick_resume_local() on a resuming CPU. That's
  418. * called from the core resume function, tick_unfreeze() and the magic XEN
  419. * resume hackery.
  420. *
  421. * In none of these cases the broadcast device mode can change and the
  422. * bit of the resuming CPU in the broadcast mask is safe as well.
  423. */
  424. bool tick_resume_check_broadcast(void)
  425. {
  426. if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT)
  427. return false;
  428. else
  429. return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask);
  430. }
  431. void tick_resume_broadcast(void)
  432. {
  433. struct clock_event_device *bc;
  434. unsigned long flags;
  435. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  436. bc = tick_broadcast_device.evtdev;
  437. if (bc) {
  438. clockevents_tick_resume(bc);
  439. switch (tick_broadcast_device.mode) {
  440. case TICKDEV_MODE_PERIODIC:
  441. if (!cpumask_empty(tick_broadcast_mask))
  442. tick_broadcast_start_periodic(bc);
  443. break;
  444. case TICKDEV_MODE_ONESHOT:
  445. if (!cpumask_empty(tick_broadcast_mask))
  446. tick_resume_broadcast_oneshot(bc);
  447. break;
  448. }
  449. }
  450. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  451. }
  452. #ifdef CONFIG_TICK_ONESHOT
  453. static cpumask_var_t tick_broadcast_oneshot_mask __cpumask_var_read_mostly;
  454. static cpumask_var_t tick_broadcast_pending_mask __cpumask_var_read_mostly;
  455. static cpumask_var_t tick_broadcast_force_mask __cpumask_var_read_mostly;
  456. /*
  457. * Exposed for debugging: see timer_list.c
  458. */
  459. struct cpumask *tick_get_broadcast_oneshot_mask(void)
  460. {
  461. return tick_broadcast_oneshot_mask;
  462. }
  463. /*
  464. * Called before going idle with interrupts disabled. Checks whether a
  465. * broadcast event from the other core is about to happen. We detected
  466. * that in tick_broadcast_oneshot_control(). The callsite can use this
  467. * to avoid a deep idle transition as we are about to get the
  468. * broadcast IPI right away.
  469. */
  470. int tick_check_broadcast_expired(void)
  471. {
  472. return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
  473. }
  474. /*
  475. * Set broadcast interrupt affinity
  476. */
  477. static void tick_broadcast_set_affinity(struct clock_event_device *bc,
  478. const struct cpumask *cpumask)
  479. {
  480. if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
  481. return;
  482. if (cpumask_equal(bc->cpumask, cpumask))
  483. return;
  484. bc->cpumask = cpumask;
  485. irq_set_affinity(bc->irq, bc->cpumask);
  486. }
  487. static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
  488. ktime_t expires)
  489. {
  490. if (!clockevent_state_oneshot(bc))
  491. clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
  492. clockevents_program_event(bc, expires, 1);
  493. tick_broadcast_set_affinity(bc, cpumask_of(cpu));
  494. }
  495. static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
  496. {
  497. clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
  498. }
  499. /*
  500. * Called from irq_enter() when idle was interrupted to reenable the
  501. * per cpu device.
  502. */
  503. void tick_check_oneshot_broadcast_this_cpu(void)
  504. {
  505. if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
  506. struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
  507. /*
  508. * We might be in the middle of switching over from
  509. * periodic to oneshot. If the CPU has not yet
  510. * switched over, leave the device alone.
  511. */
  512. if (td->mode == TICKDEV_MODE_ONESHOT) {
  513. clockevents_switch_state(td->evtdev,
  514. CLOCK_EVT_STATE_ONESHOT);
  515. }
  516. }
  517. }
  518. /*
  519. * Handle oneshot mode broadcasting
  520. */
  521. static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
  522. {
  523. struct tick_device *td;
  524. ktime_t now, next_event;
  525. int cpu, next_cpu = 0;
  526. bool bc_local;
  527. raw_spin_lock(&tick_broadcast_lock);
  528. dev->next_event = KTIME_MAX;
  529. next_event = KTIME_MAX;
  530. cpumask_clear(tmpmask);
  531. now = ktime_get();
  532. /* Find all expired events */
  533. for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
  534. td = &per_cpu(tick_cpu_device, cpu);
  535. if (td->evtdev->next_event <= now) {
  536. cpumask_set_cpu(cpu, tmpmask);
  537. /*
  538. * Mark the remote cpu in the pending mask, so
  539. * it can avoid reprogramming the cpu local
  540. * timer in tick_broadcast_oneshot_control().
  541. */
  542. cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
  543. } else if (td->evtdev->next_event < next_event) {
  544. next_event = td->evtdev->next_event;
  545. next_cpu = cpu;
  546. }
  547. }
  548. /*
  549. * Remove the current cpu from the pending mask. The event is
  550. * delivered immediately in tick_do_broadcast() !
  551. */
  552. cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
  553. /* Take care of enforced broadcast requests */
  554. cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
  555. cpumask_clear(tick_broadcast_force_mask);
  556. /*
  557. * Sanity check. Catch the case where we try to broadcast to
  558. * offline cpus.
  559. */
  560. if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
  561. cpumask_and(tmpmask, tmpmask, cpu_online_mask);
  562. /*
  563. * Wakeup the cpus which have an expired event.
  564. */
  565. bc_local = tick_do_broadcast(tmpmask);
  566. /*
  567. * Two reasons for reprogram:
  568. *
  569. * - The global event did not expire any CPU local
  570. * events. This happens in dyntick mode, as the maximum PIT
  571. * delta is quite small.
  572. *
  573. * - There are pending events on sleeping CPUs which were not
  574. * in the event mask
  575. */
  576. if (next_event != KTIME_MAX)
  577. tick_broadcast_set_event(dev, next_cpu, next_event);
  578. raw_spin_unlock(&tick_broadcast_lock);
  579. if (bc_local) {
  580. td = this_cpu_ptr(&tick_cpu_device);
  581. td->evtdev->event_handler(td->evtdev);
  582. }
  583. }
  584. static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
  585. {
  586. if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
  587. return 0;
  588. if (bc->next_event == KTIME_MAX)
  589. return 0;
  590. return bc->bound_on == cpu ? -EBUSY : 0;
  591. }
  592. static void broadcast_shutdown_local(struct clock_event_device *bc,
  593. struct clock_event_device *dev)
  594. {
  595. /*
  596. * For hrtimer based broadcasting we cannot shutdown the cpu
  597. * local device if our own event is the first one to expire or
  598. * if we own the broadcast timer.
  599. */
  600. if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
  601. if (broadcast_needs_cpu(bc, smp_processor_id()))
  602. return;
  603. if (dev->next_event < bc->next_event)
  604. return;
  605. }
  606. clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
  607. }
  608. int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
  609. {
  610. struct clock_event_device *bc, *dev;
  611. int cpu, ret = 0;
  612. ktime_t now;
  613. /*
  614. * If there is no broadcast device, tell the caller not to go
  615. * into deep idle.
  616. */
  617. if (!tick_broadcast_device.evtdev)
  618. return -EBUSY;
  619. dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
  620. raw_spin_lock(&tick_broadcast_lock);
  621. bc = tick_broadcast_device.evtdev;
  622. cpu = smp_processor_id();
  623. if (state == TICK_BROADCAST_ENTER) {
  624. /*
  625. * If the current CPU owns the hrtimer broadcast
  626. * mechanism, it cannot go deep idle and we do not add
  627. * the CPU to the broadcast mask. We don't have to go
  628. * through the EXIT path as the local timer is not
  629. * shutdown.
  630. */
  631. ret = broadcast_needs_cpu(bc, cpu);
  632. if (ret)
  633. goto out;
  634. /*
  635. * If the broadcast device is in periodic mode, we
  636. * return.
  637. */
  638. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
  639. /* If it is a hrtimer based broadcast, return busy */
  640. if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
  641. ret = -EBUSY;
  642. goto out;
  643. }
  644. if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
  645. WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
  646. /* Conditionally shut down the local timer. */
  647. broadcast_shutdown_local(bc, dev);
  648. /*
  649. * We only reprogram the broadcast timer if we
  650. * did not mark ourself in the force mask and
  651. * if the cpu local event is earlier than the
  652. * broadcast event. If the current CPU is in
  653. * the force mask, then we are going to be
  654. * woken by the IPI right away; we return
  655. * busy, so the CPU does not try to go deep
  656. * idle.
  657. */
  658. if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
  659. ret = -EBUSY;
  660. } else if (dev->next_event < bc->next_event) {
  661. tick_broadcast_set_event(bc, cpu, dev->next_event);
  662. /*
  663. * In case of hrtimer broadcasts the
  664. * programming might have moved the
  665. * timer to this cpu. If yes, remove
  666. * us from the broadcast mask and
  667. * return busy.
  668. */
  669. ret = broadcast_needs_cpu(bc, cpu);
  670. if (ret) {
  671. cpumask_clear_cpu(cpu,
  672. tick_broadcast_oneshot_mask);
  673. }
  674. }
  675. }
  676. } else {
  677. if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
  678. clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
  679. /*
  680. * The cpu which was handling the broadcast
  681. * timer marked this cpu in the broadcast
  682. * pending mask and fired the broadcast
  683. * IPI. So we are going to handle the expired
  684. * event anyway via the broadcast IPI
  685. * handler. No need to reprogram the timer
  686. * with an already expired event.
  687. */
  688. if (cpumask_test_and_clear_cpu(cpu,
  689. tick_broadcast_pending_mask))
  690. goto out;
  691. /*
  692. * Bail out if there is no next event.
  693. */
  694. if (dev->next_event == KTIME_MAX)
  695. goto out;
  696. /*
  697. * If the pending bit is not set, then we are
  698. * either the CPU handling the broadcast
  699. * interrupt or we got woken by something else.
  700. *
  701. * We are not longer in the broadcast mask, so
  702. * if the cpu local expiry time is already
  703. * reached, we would reprogram the cpu local
  704. * timer with an already expired event.
  705. *
  706. * This can lead to a ping-pong when we return
  707. * to idle and therefor rearm the broadcast
  708. * timer before the cpu local timer was able
  709. * to fire. This happens because the forced
  710. * reprogramming makes sure that the event
  711. * will happen in the future and depending on
  712. * the min_delta setting this might be far
  713. * enough out that the ping-pong starts.
  714. *
  715. * If the cpu local next_event has expired
  716. * then we know that the broadcast timer
  717. * next_event has expired as well and
  718. * broadcast is about to be handled. So we
  719. * avoid reprogramming and enforce that the
  720. * broadcast handler, which did not run yet,
  721. * will invoke the cpu local handler.
  722. *
  723. * We cannot call the handler directly from
  724. * here, because we might be in a NOHZ phase
  725. * and we did not go through the irq_enter()
  726. * nohz fixups.
  727. */
  728. now = ktime_get();
  729. if (dev->next_event <= now) {
  730. cpumask_set_cpu(cpu, tick_broadcast_force_mask);
  731. goto out;
  732. }
  733. /*
  734. * We got woken by something else. Reprogram
  735. * the cpu local timer device.
  736. */
  737. tick_program_event(dev->next_event, 1);
  738. }
  739. }
  740. out:
  741. raw_spin_unlock(&tick_broadcast_lock);
  742. return ret;
  743. }
  744. /*
  745. * Reset the one shot broadcast for a cpu
  746. *
  747. * Called with tick_broadcast_lock held
  748. */
  749. static void tick_broadcast_clear_oneshot(int cpu)
  750. {
  751. cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
  752. cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
  753. }
  754. static void tick_broadcast_init_next_event(struct cpumask *mask,
  755. ktime_t expires)
  756. {
  757. struct tick_device *td;
  758. int cpu;
  759. for_each_cpu(cpu, mask) {
  760. td = &per_cpu(tick_cpu_device, cpu);
  761. if (td->evtdev)
  762. td->evtdev->next_event = expires;
  763. }
  764. }
  765. /**
  766. * tick_broadcast_setup_oneshot - setup the broadcast device
  767. */
  768. static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
  769. {
  770. int cpu = smp_processor_id();
  771. if (!bc)
  772. return;
  773. /* Set it up only once ! */
  774. if (bc->event_handler != tick_handle_oneshot_broadcast) {
  775. int was_periodic = clockevent_state_periodic(bc);
  776. bc->event_handler = tick_handle_oneshot_broadcast;
  777. /*
  778. * We must be careful here. There might be other CPUs
  779. * waiting for periodic broadcast. We need to set the
  780. * oneshot_mask bits for those and program the
  781. * broadcast device to fire.
  782. */
  783. cpumask_copy(tmpmask, tick_broadcast_mask);
  784. cpumask_clear_cpu(cpu, tmpmask);
  785. cpumask_or(tick_broadcast_oneshot_mask,
  786. tick_broadcast_oneshot_mask, tmpmask);
  787. if (was_periodic && !cpumask_empty(tmpmask)) {
  788. clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
  789. tick_broadcast_init_next_event(tmpmask,
  790. tick_next_period);
  791. tick_broadcast_set_event(bc, cpu, tick_next_period);
  792. } else
  793. bc->next_event = KTIME_MAX;
  794. } else {
  795. /*
  796. * The first cpu which switches to oneshot mode sets
  797. * the bit for all other cpus which are in the general
  798. * (periodic) broadcast mask. So the bit is set and
  799. * would prevent the first broadcast enter after this
  800. * to program the bc device.
  801. */
  802. tick_broadcast_clear_oneshot(cpu);
  803. }
  804. }
  805. /*
  806. * Select oneshot operating mode for the broadcast device
  807. */
  808. void tick_broadcast_switch_to_oneshot(void)
  809. {
  810. struct clock_event_device *bc;
  811. unsigned long flags;
  812. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  813. tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
  814. bc = tick_broadcast_device.evtdev;
  815. if (bc)
  816. tick_broadcast_setup_oneshot(bc);
  817. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  818. }
  819. #ifdef CONFIG_HOTPLUG_CPU
  820. void hotplug_cpu__broadcast_tick_pull(int deadcpu)
  821. {
  822. struct clock_event_device *bc;
  823. unsigned long flags;
  824. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  825. bc = tick_broadcast_device.evtdev;
  826. if (bc && broadcast_needs_cpu(bc, deadcpu)) {
  827. /* This moves the broadcast assignment to this CPU: */
  828. clockevents_program_event(bc, bc->next_event, 1);
  829. }
  830. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  831. }
  832. /*
  833. * Remove a dead CPU from broadcasting
  834. */
  835. void tick_shutdown_broadcast_oneshot(unsigned int cpu)
  836. {
  837. unsigned long flags;
  838. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  839. /*
  840. * Clear the broadcast masks for the dead cpu, but do not stop
  841. * the broadcast device!
  842. */
  843. cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
  844. cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
  845. cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
  846. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  847. }
  848. #endif
  849. /*
  850. * Check, whether the broadcast device is in one shot mode
  851. */
  852. int tick_broadcast_oneshot_active(void)
  853. {
  854. return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
  855. }
  856. /*
  857. * Check whether the broadcast device supports oneshot.
  858. */
  859. bool tick_broadcast_oneshot_available(void)
  860. {
  861. struct clock_event_device *bc = tick_broadcast_device.evtdev;
  862. return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
  863. }
  864. #else
  865. int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
  866. {
  867. struct clock_event_device *bc = tick_broadcast_device.evtdev;
  868. if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
  869. return -EBUSY;
  870. return 0;
  871. }
  872. #endif
  873. void __init tick_broadcast_init(void)
  874. {
  875. zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
  876. zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
  877. zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
  878. #ifdef CONFIG_TICK_ONESHOT
  879. zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
  880. zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
  881. zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
  882. #endif
  883. }