sun4d_irq.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. /*
  2. * arch/sparc/kernel/sun4d_irq.c:
  3. * SS1000/SC2000 interrupt handling.
  4. *
  5. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  6. * Heavily based on arch/sparc/kernel/irq.c.
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/linkage.h>
  10. #include <linux/kernel_stat.h>
  11. #include <linux/signal.h>
  12. #include <linux/sched.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/random.h>
  17. #include <linux/init.h>
  18. #include <linux/smp.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <asm/ptrace.h>
  24. #include <asm/processor.h>
  25. #include <asm/system.h>
  26. #include <asm/psr.h>
  27. #include <asm/smp.h>
  28. #include <asm/vaddrs.h>
  29. #include <asm/timer.h>
  30. #include <asm/openprom.h>
  31. #include <asm/oplib.h>
  32. #include <asm/traps.h>
  33. #include <asm/irq.h>
  34. #include <asm/io.h>
  35. #include <asm/pgalloc.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/sbi.h>
  38. #include <asm/cacheflush.h>
  39. #include <asm/irq_regs.h>
  40. #include "irq.h"
  41. /* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
  42. /* #define DISTRIBUTE_IRQS */
  43. struct sun4d_timer_regs *sun4d_timers;
  44. #define TIMER_IRQ 10
  45. #define MAX_STATIC_ALLOC 4
  46. extern struct irqaction static_irqaction[MAX_STATIC_ALLOC];
  47. extern int static_irq_count;
  48. unsigned char cpu_leds[32];
  49. #ifdef CONFIG_SMP
  50. static unsigned char sbus_tid[32];
  51. #endif
  52. static struct irqaction *irq_action[NR_IRQS];
  53. extern spinlock_t irq_action_lock;
  54. static struct sbus_action {
  55. struct irqaction *action;
  56. /* For SMP this needs to be extended */
  57. } *sbus_actions;
  58. static int pil_to_sbus[] = {
  59. 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
  60. };
  61. static int sbus_to_pil[] = {
  62. 0, 2, 3, 5, 7, 9, 11, 13,
  63. };
  64. static int nsbi;
  65. #ifdef CONFIG_SMP
  66. DEFINE_SPINLOCK(sun4d_imsk_lock);
  67. #endif
  68. int show_sun4d_interrupts(struct seq_file *p, void *v)
  69. {
  70. int i = *(loff_t *) v, j = 0, k = 0, sbusl;
  71. struct irqaction * action;
  72. unsigned long flags;
  73. #ifdef CONFIG_SMP
  74. int x;
  75. #endif
  76. spin_lock_irqsave(&irq_action_lock, flags);
  77. if (i < NR_IRQS) {
  78. sbusl = pil_to_sbus[i];
  79. if (!sbusl) {
  80. action = *(i + irq_action);
  81. if (!action)
  82. goto out_unlock;
  83. } else {
  84. for (j = 0; j < nsbi; j++) {
  85. for (k = 0; k < 4; k++)
  86. if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
  87. goto found_it;
  88. }
  89. goto out_unlock;
  90. }
  91. found_it: seq_printf(p, "%3d: ", i);
  92. #ifndef CONFIG_SMP
  93. seq_printf(p, "%10u ", kstat_irqs(i));
  94. #else
  95. for_each_online_cpu(x)
  96. seq_printf(p, "%10u ",
  97. kstat_cpu(cpu_logical_map(x)).irqs[i]);
  98. #endif
  99. seq_printf(p, "%c %s",
  100. (action->flags & IRQF_DISABLED) ? '+' : ' ',
  101. action->name);
  102. action = action->next;
  103. for (;;) {
  104. for (; action; action = action->next) {
  105. seq_printf(p, ",%s %s",
  106. (action->flags & IRQF_DISABLED) ? " +" : "",
  107. action->name);
  108. }
  109. if (!sbusl) break;
  110. k++;
  111. if (k < 4)
  112. action = sbus_actions [(j << 5) + (sbusl << 2) + k].action;
  113. else {
  114. j++;
  115. if (j == nsbi) break;
  116. k = 0;
  117. action = sbus_actions [(j << 5) + (sbusl << 2)].action;
  118. }
  119. }
  120. seq_putc(p, '\n');
  121. }
  122. out_unlock:
  123. spin_unlock_irqrestore(&irq_action_lock, flags);
  124. return 0;
  125. }
  126. void sun4d_free_irq(unsigned int irq, void *dev_id)
  127. {
  128. struct irqaction *action, **actionp;
  129. struct irqaction *tmp = NULL;
  130. unsigned long flags;
  131. spin_lock_irqsave(&irq_action_lock, flags);
  132. if (irq < 15)
  133. actionp = irq + irq_action;
  134. else
  135. actionp = &(sbus_actions[irq - (1 << 5)].action);
  136. action = *actionp;
  137. if (!action) {
  138. printk("Trying to free free IRQ%d\n",irq);
  139. goto out_unlock;
  140. }
  141. if (dev_id) {
  142. for (; action; action = action->next) {
  143. if (action->dev_id == dev_id)
  144. break;
  145. tmp = action;
  146. }
  147. if (!action) {
  148. printk("Trying to free free shared IRQ%d\n",irq);
  149. goto out_unlock;
  150. }
  151. } else if (action->flags & IRQF_SHARED) {
  152. printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
  153. goto out_unlock;
  154. }
  155. if (action->flags & SA_STATIC_ALLOC)
  156. {
  157. /* This interrupt is marked as specially allocated
  158. * so it is a bad idea to free it.
  159. */
  160. printk("Attempt to free statically allocated IRQ%d (%s)\n",
  161. irq, action->name);
  162. goto out_unlock;
  163. }
  164. if (action && tmp)
  165. tmp->next = action->next;
  166. else
  167. *actionp = action->next;
  168. spin_unlock_irqrestore(&irq_action_lock, flags);
  169. synchronize_irq(irq);
  170. spin_lock_irqsave(&irq_action_lock, flags);
  171. kfree(action);
  172. if (!(*actionp))
  173. __disable_irq(irq);
  174. out_unlock:
  175. spin_unlock_irqrestore(&irq_action_lock, flags);
  176. }
  177. extern void unexpected_irq(int, void *, struct pt_regs *);
  178. void sun4d_handler_irq(int irq, struct pt_regs * regs)
  179. {
  180. struct pt_regs *old_regs;
  181. struct irqaction * action;
  182. int cpu = smp_processor_id();
  183. /* SBUS IRQ level (1 - 7) */
  184. int sbusl = pil_to_sbus[irq];
  185. /* FIXME: Is this necessary?? */
  186. cc_get_ipen();
  187. cc_set_iclr(1 << irq);
  188. old_regs = set_irq_regs(regs);
  189. irq_enter();
  190. kstat_cpu(cpu).irqs[irq]++;
  191. if (!sbusl) {
  192. action = *(irq + irq_action);
  193. if (!action)
  194. unexpected_irq(irq, NULL, regs);
  195. do {
  196. action->handler(irq, action->dev_id);
  197. action = action->next;
  198. } while (action);
  199. } else {
  200. int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
  201. int sbino;
  202. struct sbus_action *actionp;
  203. unsigned mask, slot;
  204. int sbil = (sbusl << 2);
  205. bw_clear_intr_mask(sbusl, bus_mask);
  206. /* Loop for each pending SBI */
  207. for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
  208. if (bus_mask & 1) {
  209. mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
  210. mask &= (0xf << sbil);
  211. actionp = sbus_actions + (sbino << 5) + (sbil);
  212. /* Loop for each pending SBI slot */
  213. for (slot = (1 << sbil); mask; slot <<= 1, actionp++)
  214. if (mask & slot) {
  215. mask &= ~slot;
  216. action = actionp->action;
  217. if (!action)
  218. unexpected_irq(irq, NULL, regs);
  219. do {
  220. action->handler(irq, action->dev_id);
  221. action = action->next;
  222. } while (action);
  223. release_sbi(SBI2DEVID(sbino), slot);
  224. }
  225. }
  226. }
  227. irq_exit();
  228. set_irq_regs(old_regs);
  229. }
  230. int sun4d_request_irq(unsigned int irq,
  231. irq_handler_t handler,
  232. unsigned long irqflags, const char * devname, void *dev_id)
  233. {
  234. struct irqaction *action, *tmp = NULL, **actionp;
  235. unsigned long flags;
  236. int ret;
  237. if(irq > 14 && irq < (1 << 5)) {
  238. ret = -EINVAL;
  239. goto out;
  240. }
  241. if (!handler) {
  242. ret = -EINVAL;
  243. goto out;
  244. }
  245. spin_lock_irqsave(&irq_action_lock, flags);
  246. if (irq >= (1 << 5))
  247. actionp = &(sbus_actions[irq - (1 << 5)].action);
  248. else
  249. actionp = irq + irq_action;
  250. action = *actionp;
  251. if (action) {
  252. if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) {
  253. for (tmp = action; tmp->next; tmp = tmp->next);
  254. } else {
  255. ret = -EBUSY;
  256. goto out_unlock;
  257. }
  258. if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) {
  259. printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
  260. ret = -EBUSY;
  261. goto out_unlock;
  262. }
  263. action = NULL; /* Or else! */
  264. }
  265. /* If this is flagged as statically allocated then we use our
  266. * private struct which is never freed.
  267. */
  268. if (irqflags & SA_STATIC_ALLOC) {
  269. if (static_irq_count < MAX_STATIC_ALLOC)
  270. action = &static_irqaction[static_irq_count++];
  271. else
  272. printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
  273. }
  274. if (action == NULL)
  275. action = kmalloc(sizeof(struct irqaction),
  276. GFP_ATOMIC);
  277. if (!action) {
  278. ret = -ENOMEM;
  279. goto out_unlock;
  280. }
  281. action->handler = handler;
  282. action->flags = irqflags;
  283. cpus_clear(action->mask);
  284. action->name = devname;
  285. action->next = NULL;
  286. action->dev_id = dev_id;
  287. if (tmp)
  288. tmp->next = action;
  289. else
  290. *actionp = action;
  291. __enable_irq(irq);
  292. ret = 0;
  293. out_unlock:
  294. spin_unlock_irqrestore(&irq_action_lock, flags);
  295. out:
  296. return ret;
  297. }
  298. static void sun4d_disable_irq(unsigned int irq)
  299. {
  300. #ifdef CONFIG_SMP
  301. int tid = sbus_tid[(irq >> 5) - 1];
  302. unsigned long flags;
  303. #endif
  304. if (irq < NR_IRQS) return;
  305. #ifdef CONFIG_SMP
  306. spin_lock_irqsave(&sun4d_imsk_lock, flags);
  307. cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7]));
  308. spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
  309. #else
  310. cc_set_imsk(cc_get_imsk() | (1 << sbus_to_pil[(irq >> 2) & 7]));
  311. #endif
  312. }
  313. static void sun4d_enable_irq(unsigned int irq)
  314. {
  315. #ifdef CONFIG_SMP
  316. int tid = sbus_tid[(irq >> 5) - 1];
  317. unsigned long flags;
  318. #endif
  319. if (irq < NR_IRQS) return;
  320. #ifdef CONFIG_SMP
  321. spin_lock_irqsave(&sun4d_imsk_lock, flags);
  322. cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
  323. spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
  324. #else
  325. cc_set_imsk(cc_get_imsk() & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
  326. #endif
  327. }
  328. #ifdef CONFIG_SMP
  329. static void sun4d_set_cpu_int(int cpu, int level)
  330. {
  331. sun4d_send_ipi(cpu, level);
  332. }
  333. static void sun4d_clear_ipi(int cpu, int level)
  334. {
  335. }
  336. static void sun4d_set_udt(int cpu)
  337. {
  338. }
  339. /* Setup IRQ distribution scheme. */
  340. void __init sun4d_distribute_irqs(void)
  341. {
  342. struct device_node *dp;
  343. #ifdef DISTRIBUTE_IRQS
  344. cpumask_t sbus_serving_map;
  345. sbus_serving_map = cpu_present_map;
  346. for_each_node_by_name(dp, "sbi") {
  347. int board = of_getintprop_default(dp, "board#", 0);
  348. if ((board * 2) == boot_cpu_id && cpu_isset(board * 2 + 1, cpu_present_map))
  349. sbus_tid[board] = (board * 2 + 1);
  350. else if (cpu_isset(board * 2, cpu_present_map))
  351. sbus_tid[board] = (board * 2);
  352. else if (cpu_isset(board * 2 + 1, cpu_present_map))
  353. sbus_tid[board] = (board * 2 + 1);
  354. else
  355. sbus_tid[board] = 0xff;
  356. if (sbus_tid[board] != 0xff)
  357. cpu_clear(sbus_tid[board], sbus_serving_map);
  358. }
  359. for_each_node_by_name(dp, "sbi") {
  360. int board = of_getintprop_default(dp, "board#", 0);
  361. if (sbus_tid[board] == 0xff) {
  362. int i = 31;
  363. if (cpus_empty(sbus_serving_map))
  364. sbus_serving_map = cpu_present_map;
  365. while (cpu_isset(i, sbus_serving_map))
  366. i--;
  367. sbus_tid[board] = i;
  368. cpu_clear(i, sbus_serving_map);
  369. }
  370. }
  371. for_each_node_by_name(dp, "sbi") {
  372. int devid = of_getintprop_default(dp, "device-id", 0);
  373. int board = of_getintprop_default(dp, "board#", 0);
  374. printk("sbus%d IRQs directed to CPU%d\n", board, sbus_tid[board]);
  375. set_sbi_tid(devid, sbus_tid[board] << 3);
  376. }
  377. #else
  378. int cpuid = cpu_logical_map(1);
  379. if (cpuid == -1)
  380. cpuid = cpu_logical_map(0);
  381. for_each_node_by_name(dp, "sbi") {
  382. int devid = of_getintprop_default(dp, "device-id", 0);
  383. int board = of_getintprop_default(dp, "board#", 0);
  384. sbus_tid[board] = cpuid;
  385. set_sbi_tid(devid, cpuid << 3);
  386. }
  387. printk("All sbus IRQs directed to CPU%d\n", cpuid);
  388. #endif
  389. }
  390. #endif
  391. static void sun4d_clear_clock_irq(void)
  392. {
  393. volatile unsigned int clear_intr;
  394. clear_intr = sun4d_timers->l10_timer_limit;
  395. }
  396. static void sun4d_clear_profile_irq(int cpu)
  397. {
  398. bw_get_prof_limit(cpu);
  399. }
  400. static void sun4d_load_profile_irq(int cpu, unsigned int limit)
  401. {
  402. bw_set_prof_limit(cpu, limit);
  403. }
  404. static void __init sun4d_init_timers(irq_handler_t counter_fn)
  405. {
  406. int irq;
  407. int cpu;
  408. struct resource r;
  409. int mid;
  410. /* Map the User Timer registers. */
  411. memset(&r, 0, sizeof(r));
  412. #ifdef CONFIG_SMP
  413. r.start = CSR_BASE(boot_cpu_id)+BW_TIMER_LIMIT;
  414. #else
  415. r.start = CSR_BASE(0)+BW_TIMER_LIMIT;
  416. #endif
  417. r.flags = 0xf;
  418. sun4d_timers = (struct sun4d_timer_regs *) of_ioremap(&r, 0,
  419. PAGE_SIZE, "user timer");
  420. sun4d_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10);
  421. master_l10_counter = &sun4d_timers->l10_cur_count;
  422. master_l10_limit = &sun4d_timers->l10_timer_limit;
  423. irq = request_irq(TIMER_IRQ,
  424. counter_fn,
  425. (IRQF_DISABLED | SA_STATIC_ALLOC),
  426. "timer", NULL);
  427. if (irq) {
  428. prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
  429. prom_halt();
  430. }
  431. /* Enable user timer free run for CPU 0 in BW */
  432. /* bw_set_ctrl(0, bw_get_ctrl(0) | BW_CTRL_USER_TIMER); */
  433. cpu = 0;
  434. while (!cpu_find_by_instance(cpu, NULL, &mid)) {
  435. sun4d_load_profile_irq(mid >> 3, 0);
  436. cpu++;
  437. }
  438. #ifdef CONFIG_SMP
  439. {
  440. unsigned long flags;
  441. extern unsigned long lvl14_save[4];
  442. struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
  443. extern unsigned int real_irq_entry[], smp4d_ticker[];
  444. extern unsigned int patchme_maybe_smp_msg[];
  445. /* Adjust so that we jump directly to smp4d_ticker */
  446. lvl14_save[2] += smp4d_ticker - real_irq_entry;
  447. /* For SMP we use the level 14 ticker, however the bootup code
  448. * has copied the firmware's level 14 vector into the boot cpu's
  449. * trap table, we must fix this now or we get squashed.
  450. */
  451. local_irq_save(flags);
  452. patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
  453. trap_table->inst_one = lvl14_save[0];
  454. trap_table->inst_two = lvl14_save[1];
  455. trap_table->inst_three = lvl14_save[2];
  456. trap_table->inst_four = lvl14_save[3];
  457. local_flush_cache_all();
  458. local_irq_restore(flags);
  459. }
  460. #endif
  461. }
  462. void __init sun4d_init_sbi_irq(void)
  463. {
  464. struct device_node *dp;
  465. nsbi = 0;
  466. for_each_node_by_name(dp, "sbi")
  467. nsbi++;
  468. sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
  469. if (!sbus_actions) {
  470. prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n");
  471. prom_halt();
  472. }
  473. for_each_node_by_name(dp, "sbi") {
  474. int devid = of_getintprop_default(dp, "device-id", 0);
  475. int board = of_getintprop_default(dp, "board#", 0);
  476. unsigned int mask;
  477. #ifdef CONFIG_SMP
  478. {
  479. extern unsigned char boot_cpu_id;
  480. set_sbi_tid(devid, boot_cpu_id << 3);
  481. sbus_tid[board] = boot_cpu_id;
  482. }
  483. #endif
  484. /* Get rid of pending irqs from PROM */
  485. mask = acquire_sbi(devid, 0xffffffff);
  486. if (mask) {
  487. printk ("Clearing pending IRQs %08x on SBI %d\n", mask, board);
  488. release_sbi(devid, mask);
  489. }
  490. }
  491. }
  492. void __init sun4d_init_IRQ(void)
  493. {
  494. local_irq_disable();
  495. BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM);
  496. BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
  497. BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
  498. BTFIXUPSET_CALL(clear_profile_irq, sun4d_clear_profile_irq, BTFIXUPCALL_NORM);
  499. BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
  500. sparc_init_timers = sun4d_init_timers;
  501. #ifdef CONFIG_SMP
  502. BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
  503. BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
  504. BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP);
  505. #endif
  506. /* Cannot enable interrupts until OBP ticker is disabled. */
  507. }