irq.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. /*
  2. * Derived from arch/i386/kernel/irq.c
  3. * Copyright (C) 1992 Linus Torvalds
  4. * Adapted from arch/i386 by Gary Thomas
  5. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  6. * Updated and modified by Cort Dougan <cort@fsmlabs.com>
  7. * Copyright (C) 1996-2001 Cort Dougan
  8. * Adapted for Power Macintosh by Paul Mackerras
  9. * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. *
  16. * This file contains the code used by various IRQ handling routines:
  17. * asking for different IRQ's should be done through these routines
  18. * instead of just grabbing them. Thus setups with different IRQ numbers
  19. * shouldn't result in any weird surprises, and installing new handlers
  20. * should be easier.
  21. *
  22. * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
  23. * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
  24. * mask register (of which only 16 are defined), hence the weird shifting
  25. * and complement of the cached_irq_mask. I want to be able to stuff
  26. * this right into the SIU SMASK register.
  27. * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
  28. * to reduce code space and undefined function references.
  29. */
  30. #undef DEBUG
  31. #include <linux/export.h>
  32. #include <linux/threads.h>
  33. #include <linux/kernel_stat.h>
  34. #include <linux/signal.h>
  35. #include <linux/sched.h>
  36. #include <linux/ptrace.h>
  37. #include <linux/ioport.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/timex.h>
  40. #include <linux/init.h>
  41. #include <linux/slab.h>
  42. #include <linux/delay.h>
  43. #include <linux/irq.h>
  44. #include <linux/seq_file.h>
  45. #include <linux/cpumask.h>
  46. #include <linux/profile.h>
  47. #include <linux/bitops.h>
  48. #include <linux/list.h>
  49. #include <linux/radix-tree.h>
  50. #include <linux/mutex.h>
  51. #include <linux/pci.h>
  52. #include <linux/debugfs.h>
  53. #include <linux/of.h>
  54. #include <linux/of_irq.h>
  55. #include <linux/uaccess.h>
  56. #include <asm/io.h>
  57. #include <asm/pgtable.h>
  58. #include <asm/irq.h>
  59. #include <asm/cache.h>
  60. #include <asm/prom.h>
  61. #include <asm/ptrace.h>
  62. #include <asm/machdep.h>
  63. #include <asm/udbg.h>
  64. #include <asm/smp.h>
  65. #include <asm/debug.h>
  66. #include <asm/livepatch.h>
  67. #include <asm/asm-prototypes.h>
  68. #ifdef CONFIG_PPC64
  69. #include <asm/paca.h>
  70. #include <asm/firmware.h>
  71. #include <asm/lv1call.h>
  72. #endif
  73. #define CREATE_TRACE_POINTS
  74. #include <asm/trace.h>
  75. #include <asm/cpu_has_feature.h>
  76. DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
  77. EXPORT_PER_CPU_SYMBOL(irq_stat);
  78. int __irq_offset_value;
  79. #ifdef CONFIG_PPC32
  80. EXPORT_SYMBOL(__irq_offset_value);
  81. atomic_t ppc_n_lost_interrupts;
  82. #ifdef CONFIG_TAU_INT
  83. extern int tau_initialized;
  84. extern int tau_interrupts(int);
  85. #endif
  86. #endif /* CONFIG_PPC32 */
  87. #ifdef CONFIG_PPC64
  88. int distribute_irqs = 1;
  89. static inline notrace unsigned long get_irq_happened(void)
  90. {
  91. unsigned long happened;
  92. __asm__ __volatile__("lbz %0,%1(13)"
  93. : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
  94. return happened;
  95. }
  96. static inline notrace void set_soft_enabled(unsigned long enable)
  97. {
  98. __asm__ __volatile__("stb %0,%1(13)"
  99. : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
  100. }
  101. static inline notrace int decrementer_check_overflow(void)
  102. {
  103. u64 now = get_tb_or_rtc();
  104. u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
  105. return now >= *next_tb;
  106. }
  107. /* This is called whenever we are re-enabling interrupts
  108. * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
  109. * there's an EE, DEC or DBELL to generate.
  110. *
  111. * This is called in two contexts: From arch_local_irq_restore()
  112. * before soft-enabling interrupts, and from the exception exit
  113. * path when returning from an interrupt from a soft-disabled to
  114. * a soft enabled context. In both case we have interrupts hard
  115. * disabled.
  116. *
  117. * We take care of only clearing the bits we handled in the
  118. * PACA irq_happened field since we can only re-emit one at a
  119. * time and we don't want to "lose" one.
  120. */
  121. notrace unsigned int __check_irq_replay(void)
  122. {
  123. /*
  124. * We use local_paca rather than get_paca() to avoid all
  125. * the debug_smp_processor_id() business in this low level
  126. * function
  127. */
  128. unsigned char happened = local_paca->irq_happened;
  129. /* Clear bit 0 which we wouldn't clear otherwise */
  130. local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
  131. /*
  132. * Force the delivery of pending soft-disabled interrupts on PS3.
  133. * Any HV call will have this side effect.
  134. */
  135. if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
  136. u64 tmp, tmp2;
  137. lv1_get_version_info(&tmp, &tmp2);
  138. }
  139. /*
  140. * Check if an hypervisor Maintenance interrupt happened.
  141. * This is a higher priority interrupt than the others, so
  142. * replay it first.
  143. */
  144. local_paca->irq_happened &= ~PACA_IRQ_HMI;
  145. if (happened & PACA_IRQ_HMI)
  146. return 0xe60;
  147. /*
  148. * We may have missed a decrementer interrupt. We check the
  149. * decrementer itself rather than the paca irq_happened field
  150. * in case we also had a rollover while hard disabled
  151. */
  152. local_paca->irq_happened &= ~PACA_IRQ_DEC;
  153. if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
  154. return 0x900;
  155. /* Finally check if an external interrupt happened */
  156. local_paca->irq_happened &= ~PACA_IRQ_EE;
  157. if (happened & PACA_IRQ_EE)
  158. return 0x500;
  159. #ifdef CONFIG_PPC_BOOK3E
  160. /* Finally check if an EPR external interrupt happened
  161. * this bit is typically set if we need to handle another
  162. * "edge" interrupt from within the MPIC "EPR" handler
  163. */
  164. local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
  165. if (happened & PACA_IRQ_EE_EDGE)
  166. return 0x500;
  167. local_paca->irq_happened &= ~PACA_IRQ_DBELL;
  168. if (happened & PACA_IRQ_DBELL)
  169. return 0x280;
  170. #else
  171. local_paca->irq_happened &= ~PACA_IRQ_DBELL;
  172. if (happened & PACA_IRQ_DBELL) {
  173. if (cpu_has_feature(CPU_FTR_HVMODE))
  174. return 0xe80;
  175. return 0xa00;
  176. }
  177. #endif /* CONFIG_PPC_BOOK3E */
  178. /* There should be nothing left ! */
  179. BUG_ON(local_paca->irq_happened != 0);
  180. return 0;
  181. }
  182. notrace void arch_local_irq_restore(unsigned long en)
  183. {
  184. unsigned char irq_happened;
  185. unsigned int replay;
  186. /* Write the new soft-enabled value */
  187. set_soft_enabled(en);
  188. if (!en)
  189. return;
  190. /*
  191. * From this point onward, we can take interrupts, preempt,
  192. * etc... unless we got hard-disabled. We check if an event
  193. * happened. If none happened, we know we can just return.
  194. *
  195. * We may have preempted before the check below, in which case
  196. * we are checking the "new" CPU instead of the old one. This
  197. * is only a problem if an event happened on the "old" CPU.
  198. *
  199. * External interrupt events will have caused interrupts to
  200. * be hard-disabled, so there is no problem, we
  201. * cannot have preempted.
  202. */
  203. irq_happened = get_irq_happened();
  204. if (!irq_happened)
  205. return;
  206. /*
  207. * We need to hard disable to get a trusted value from
  208. * __check_irq_replay(). We also need to soft-disable
  209. * again to avoid warnings in there due to the use of
  210. * per-cpu variables.
  211. *
  212. * We know that if the value in irq_happened is exactly 0x01
  213. * then we are already hard disabled (there are other less
  214. * common cases that we'll ignore for now), so we skip the
  215. * (expensive) mtmsrd.
  216. */
  217. if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
  218. __hard_irq_disable();
  219. #ifdef CONFIG_TRACE_IRQFLAGS
  220. else {
  221. /*
  222. * We should already be hard disabled here. We had bugs
  223. * where that wasn't the case so let's dbl check it and
  224. * warn if we are wrong. Only do that when IRQ tracing
  225. * is enabled as mfmsr() can be costly.
  226. */
  227. if (WARN_ON(mfmsr() & MSR_EE))
  228. __hard_irq_disable();
  229. }
  230. #endif /* CONFIG_TRACE_IRQFLAGS */
  231. set_soft_enabled(0);
  232. /*
  233. * Check if anything needs to be re-emitted. We haven't
  234. * soft-enabled yet to avoid warnings in decrementer_check_overflow
  235. * accessing per-cpu variables
  236. */
  237. replay = __check_irq_replay();
  238. /* We can soft-enable now */
  239. set_soft_enabled(1);
  240. /*
  241. * And replay if we have to. This will return with interrupts
  242. * hard-enabled.
  243. */
  244. if (replay) {
  245. __replay_interrupt(replay);
  246. return;
  247. }
  248. /* Finally, let's ensure we are hard enabled */
  249. __hard_irq_enable();
  250. }
  251. EXPORT_SYMBOL(arch_local_irq_restore);
  252. /*
  253. * This is specifically called by assembly code to re-enable interrupts
  254. * if they are currently disabled. This is typically called before
  255. * schedule() or do_signal() when returning to userspace. We do it
  256. * in C to avoid the burden of dealing with lockdep etc...
  257. *
  258. * NOTE: This is called with interrupts hard disabled but not marked
  259. * as such in paca->irq_happened, so we need to resync this.
  260. */
  261. void notrace restore_interrupts(void)
  262. {
  263. if (irqs_disabled()) {
  264. local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
  265. local_irq_enable();
  266. } else
  267. __hard_irq_enable();
  268. }
  269. /*
  270. * This is a helper to use when about to go into idle low-power
  271. * when the latter has the side effect of re-enabling interrupts
  272. * (such as calling H_CEDE under pHyp).
  273. *
  274. * You call this function with interrupts soft-disabled (this is
  275. * already the case when ppc_md.power_save is called). The function
  276. * will return whether to enter power save or just return.
  277. *
  278. * In the former case, it will have notified lockdep of interrupts
  279. * being re-enabled and generally sanitized the lazy irq state,
  280. * and in the latter case it will leave with interrupts hard
  281. * disabled and marked as such, so the local_irq_enable() call
  282. * in arch_cpu_idle() will properly re-enable everything.
  283. */
  284. bool prep_irq_for_idle(void)
  285. {
  286. /*
  287. * First we need to hard disable to ensure no interrupt
  288. * occurs before we effectively enter the low power state
  289. */
  290. hard_irq_disable();
  291. /*
  292. * If anything happened while we were soft-disabled,
  293. * we return now and do not enter the low power state.
  294. */
  295. if (lazy_irq_pending())
  296. return false;
  297. /* Tell lockdep we are about to re-enable */
  298. trace_hardirqs_on();
  299. /*
  300. * Mark interrupts as soft-enabled and clear the
  301. * PACA_IRQ_HARD_DIS from the pending mask since we
  302. * are about to hard enable as well as a side effect
  303. * of entering the low power state.
  304. */
  305. local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
  306. local_paca->soft_enabled = 1;
  307. /* Tell the caller to enter the low power state */
  308. return true;
  309. }
  310. /*
  311. * Force a replay of the external interrupt handler on this CPU.
  312. */
  313. void force_external_irq_replay(void)
  314. {
  315. /*
  316. * This must only be called with interrupts soft-disabled,
  317. * the replay will happen when re-enabling.
  318. */
  319. WARN_ON(!arch_irqs_disabled());
  320. /* Indicate in the PACA that we have an interrupt to replay */
  321. local_paca->irq_happened |= PACA_IRQ_EE;
  322. }
  323. #endif /* CONFIG_PPC64 */
  324. int arch_show_interrupts(struct seq_file *p, int prec)
  325. {
  326. int j;
  327. #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
  328. if (tau_initialized) {
  329. seq_printf(p, "%*s: ", prec, "TAU");
  330. for_each_online_cpu(j)
  331. seq_printf(p, "%10u ", tau_interrupts(j));
  332. seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
  333. }
  334. #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
  335. seq_printf(p, "%*s: ", prec, "LOC");
  336. for_each_online_cpu(j)
  337. seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
  338. seq_printf(p, " Local timer interrupts for timer event device\n");
  339. seq_printf(p, "%*s: ", prec, "LOC");
  340. for_each_online_cpu(j)
  341. seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
  342. seq_printf(p, " Local timer interrupts for others\n");
  343. seq_printf(p, "%*s: ", prec, "SPU");
  344. for_each_online_cpu(j)
  345. seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
  346. seq_printf(p, " Spurious interrupts\n");
  347. seq_printf(p, "%*s: ", prec, "PMI");
  348. for_each_online_cpu(j)
  349. seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
  350. seq_printf(p, " Performance monitoring interrupts\n");
  351. seq_printf(p, "%*s: ", prec, "MCE");
  352. for_each_online_cpu(j)
  353. seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
  354. seq_printf(p, " Machine check exceptions\n");
  355. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  356. seq_printf(p, "%*s: ", prec, "HMI");
  357. for_each_online_cpu(j)
  358. seq_printf(p, "%10u ",
  359. per_cpu(irq_stat, j).hmi_exceptions);
  360. seq_printf(p, " Hypervisor Maintenance Interrupts\n");
  361. }
  362. #ifdef CONFIG_PPC_DOORBELL
  363. if (cpu_has_feature(CPU_FTR_DBELL)) {
  364. seq_printf(p, "%*s: ", prec, "DBL");
  365. for_each_online_cpu(j)
  366. seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
  367. seq_printf(p, " Doorbell interrupts\n");
  368. }
  369. #endif
  370. return 0;
  371. }
  372. /*
  373. * /proc/stat helpers
  374. */
  375. u64 arch_irq_stat_cpu(unsigned int cpu)
  376. {
  377. u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
  378. sum += per_cpu(irq_stat, cpu).pmu_irqs;
  379. sum += per_cpu(irq_stat, cpu).mce_exceptions;
  380. sum += per_cpu(irq_stat, cpu).spurious_irqs;
  381. sum += per_cpu(irq_stat, cpu).timer_irqs_others;
  382. sum += per_cpu(irq_stat, cpu).hmi_exceptions;
  383. #ifdef CONFIG_PPC_DOORBELL
  384. sum += per_cpu(irq_stat, cpu).doorbell_irqs;
  385. #endif
  386. return sum;
  387. }
  388. #ifdef CONFIG_HOTPLUG_CPU
  389. void migrate_irqs(void)
  390. {
  391. struct irq_desc *desc;
  392. unsigned int irq;
  393. static int warned;
  394. cpumask_var_t mask;
  395. const struct cpumask *map = cpu_online_mask;
  396. alloc_cpumask_var(&mask, GFP_KERNEL);
  397. for_each_irq_desc(irq, desc) {
  398. struct irq_data *data;
  399. struct irq_chip *chip;
  400. data = irq_desc_get_irq_data(desc);
  401. if (irqd_is_per_cpu(data))
  402. continue;
  403. chip = irq_data_get_irq_chip(data);
  404. cpumask_and(mask, irq_data_get_affinity_mask(data), map);
  405. if (cpumask_any(mask) >= nr_cpu_ids) {
  406. pr_warn("Breaking affinity for irq %i\n", irq);
  407. cpumask_copy(mask, map);
  408. }
  409. if (chip->irq_set_affinity)
  410. chip->irq_set_affinity(data, mask, true);
  411. else if (desc->action && !(warned++))
  412. pr_err("Cannot set affinity for irq %i\n", irq);
  413. }
  414. free_cpumask_var(mask);
  415. local_irq_enable();
  416. mdelay(1);
  417. local_irq_disable();
  418. }
  419. #endif
  420. static inline void check_stack_overflow(void)
  421. {
  422. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  423. long sp;
  424. sp = current_stack_pointer() & (THREAD_SIZE-1);
  425. /* check for stack overflow: is there less than 2KB free? */
  426. if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
  427. pr_err("do_IRQ: stack overflow: %ld\n",
  428. sp - sizeof(struct thread_info));
  429. dump_stack();
  430. }
  431. #endif
  432. }
  433. void __do_irq(struct pt_regs *regs)
  434. {
  435. unsigned int irq;
  436. irq_enter();
  437. trace_irq_entry(regs);
  438. check_stack_overflow();
  439. /*
  440. * Query the platform PIC for the interrupt & ack it.
  441. *
  442. * This will typically lower the interrupt line to the CPU
  443. */
  444. irq = ppc_md.get_irq();
  445. /* We can hard enable interrupts now to allow perf interrupts */
  446. may_hard_irq_enable();
  447. /* And finally process it */
  448. if (unlikely(!irq))
  449. __this_cpu_inc(irq_stat.spurious_irqs);
  450. else
  451. generic_handle_irq(irq);
  452. trace_irq_exit(regs);
  453. irq_exit();
  454. }
  455. void do_IRQ(struct pt_regs *regs)
  456. {
  457. struct pt_regs *old_regs = set_irq_regs(regs);
  458. struct thread_info *curtp, *irqtp, *sirqtp;
  459. /* Switch to the irq stack to handle this */
  460. curtp = current_thread_info();
  461. irqtp = hardirq_ctx[raw_smp_processor_id()];
  462. sirqtp = softirq_ctx[raw_smp_processor_id()];
  463. /* Already there ? */
  464. if (unlikely(curtp == irqtp || curtp == sirqtp)) {
  465. __do_irq(regs);
  466. set_irq_regs(old_regs);
  467. return;
  468. }
  469. /* Prepare the thread_info in the irq stack */
  470. irqtp->task = curtp->task;
  471. irqtp->flags = 0;
  472. /* Copy the preempt_count so that the [soft]irq checks work. */
  473. irqtp->preempt_count = curtp->preempt_count;
  474. /* Switch stack and call */
  475. call_do_irq(regs, irqtp);
  476. /* Restore stack limit */
  477. irqtp->task = NULL;
  478. /* Copy back updates to the thread_info */
  479. if (irqtp->flags)
  480. set_bits(irqtp->flags, &curtp->flags);
  481. set_irq_regs(old_regs);
  482. }
  483. void __init init_IRQ(void)
  484. {
  485. if (ppc_md.init_IRQ)
  486. ppc_md.init_IRQ();
  487. exc_lvl_ctx_init();
  488. irq_ctx_init();
  489. }
  490. #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
  491. struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
  492. struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
  493. struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
  494. void exc_lvl_ctx_init(void)
  495. {
  496. struct thread_info *tp;
  497. int i, cpu_nr;
  498. for_each_possible_cpu(i) {
  499. #ifdef CONFIG_PPC64
  500. cpu_nr = i;
  501. #else
  502. #ifdef CONFIG_SMP
  503. cpu_nr = get_hard_smp_processor_id(i);
  504. #else
  505. cpu_nr = 0;
  506. #endif
  507. #endif
  508. memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
  509. tp = critirq_ctx[cpu_nr];
  510. tp->cpu = cpu_nr;
  511. tp->preempt_count = 0;
  512. #ifdef CONFIG_BOOKE
  513. memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
  514. tp = dbgirq_ctx[cpu_nr];
  515. tp->cpu = cpu_nr;
  516. tp->preempt_count = 0;
  517. memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
  518. tp = mcheckirq_ctx[cpu_nr];
  519. tp->cpu = cpu_nr;
  520. tp->preempt_count = HARDIRQ_OFFSET;
  521. #endif
  522. }
  523. }
  524. #endif
  525. struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
  526. struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
  527. void irq_ctx_init(void)
  528. {
  529. struct thread_info *tp;
  530. int i;
  531. for_each_possible_cpu(i) {
  532. memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
  533. tp = softirq_ctx[i];
  534. tp->cpu = i;
  535. klp_init_thread_info(tp);
  536. memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
  537. tp = hardirq_ctx[i];
  538. tp->cpu = i;
  539. klp_init_thread_info(tp);
  540. }
  541. }
  542. void do_softirq_own_stack(void)
  543. {
  544. struct thread_info *curtp, *irqtp;
  545. curtp = current_thread_info();
  546. irqtp = softirq_ctx[smp_processor_id()];
  547. irqtp->task = curtp->task;
  548. irqtp->flags = 0;
  549. call_do_softirq(irqtp);
  550. irqtp->task = NULL;
  551. /* Set any flag that may have been set on the
  552. * alternate stack
  553. */
  554. if (irqtp->flags)
  555. set_bits(irqtp->flags, &curtp->flags);
  556. }
  557. irq_hw_number_t virq_to_hw(unsigned int virq)
  558. {
  559. struct irq_data *irq_data = irq_get_irq_data(virq);
  560. return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
  561. }
  562. EXPORT_SYMBOL_GPL(virq_to_hw);
  563. #ifdef CONFIG_SMP
  564. int irq_choose_cpu(const struct cpumask *mask)
  565. {
  566. int cpuid;
  567. if (cpumask_equal(mask, cpu_online_mask)) {
  568. static int irq_rover;
  569. static DEFINE_RAW_SPINLOCK(irq_rover_lock);
  570. unsigned long flags;
  571. /* Round-robin distribution... */
  572. do_round_robin:
  573. raw_spin_lock_irqsave(&irq_rover_lock, flags);
  574. irq_rover = cpumask_next(irq_rover, cpu_online_mask);
  575. if (irq_rover >= nr_cpu_ids)
  576. irq_rover = cpumask_first(cpu_online_mask);
  577. cpuid = irq_rover;
  578. raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
  579. } else {
  580. cpuid = cpumask_first_and(mask, cpu_online_mask);
  581. if (cpuid >= nr_cpu_ids)
  582. goto do_round_robin;
  583. }
  584. return get_hard_smp_processor_id(cpuid);
  585. }
  586. #else
  587. int irq_choose_cpu(const struct cpumask *mask)
  588. {
  589. return hard_smp_processor_id();
  590. }
  591. #endif
  592. int arch_early_irq_init(void)
  593. {
  594. return 0;
  595. }
  596. #ifdef CONFIG_PPC64
  597. static int __init setup_noirqdistrib(char *str)
  598. {
  599. distribute_irqs = 0;
  600. return 1;
  601. }
  602. __setup("noirqdistrib", setup_noirqdistrib);
  603. #endif /* CONFIG_PPC64 */