arbiter.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Memory arbiter functions. Allocates bandwidth through the
  4. * arbiter and sets up arbiter breakpoints.
  5. *
  6. * The algorithm first assigns slots to the clients that has specified
  7. * bandwidth (e.g. ethernet) and then the remaining slots are divided
  8. * on all the active clients.
  9. *
  10. * Copyright (c) 2004-2007 Axis Communications AB.
  11. *
  12. * The artpec-3 has two arbiters. The memory hierarchy looks like this:
  13. *
  14. *
  15. * CPU DMAs
  16. * | |
  17. * | |
  18. * -------------- ------------------
  19. * | foo arbiter|----| Internal memory|
  20. * -------------- ------------------
  21. * |
  22. * --------------
  23. * | L2 cache |
  24. * --------------
  25. * |
  26. * h264 etc |
  27. * | |
  28. * | |
  29. * --------------
  30. * | bar arbiter|
  31. * --------------
  32. * |
  33. * ---------
  34. * | SDRAM |
  35. * ---------
  36. *
  37. */
  38. #include <hwregs/reg_map.h>
  39. #include <hwregs/reg_rdwr.h>
  40. #include <hwregs/marb_foo_defs.h>
  41. #include <hwregs/marb_bar_defs.h>
  42. #include <arbiter.h>
  43. #include <hwregs/intr_vect.h>
  44. #include <linux/interrupt.h>
  45. #include <linux/irq.h>
  46. #include <linux/signal.h>
  47. #include <linux/errno.h>
  48. #include <linux/spinlock.h>
  49. #include <asm/io.h>
  50. #include <asm/irq_regs.h>
  51. #define D(x)
  52. struct crisv32_watch_entry {
  53. unsigned long instance;
  54. watch_callback *cb;
  55. unsigned long start;
  56. unsigned long end;
  57. int used;
  58. };
  59. #define NUMBER_OF_BP 4
  60. #define SDRAM_BANDWIDTH 400000000
  61. #define INTMEM_BANDWIDTH 400000000
  62. #define NBR_OF_SLOTS 64
  63. #define NBR_OF_REGIONS 2
  64. #define NBR_OF_CLIENTS 15
  65. #define ARBITERS 2
  66. #define UNASSIGNED 100
  67. struct arbiter {
  68. unsigned long instance;
  69. int nbr_regions;
  70. int nbr_clients;
  71. int requested_slots[NBR_OF_REGIONS][NBR_OF_CLIENTS];
  72. int active_clients[NBR_OF_REGIONS][NBR_OF_CLIENTS];
  73. };
  74. static struct crisv32_watch_entry watches[ARBITERS][NUMBER_OF_BP] =
  75. {
  76. {
  77. {regi_marb_foo_bp0},
  78. {regi_marb_foo_bp1},
  79. {regi_marb_foo_bp2},
  80. {regi_marb_foo_bp3}
  81. },
  82. {
  83. {regi_marb_bar_bp0},
  84. {regi_marb_bar_bp1},
  85. {regi_marb_bar_bp2},
  86. {regi_marb_bar_bp3}
  87. }
  88. };
  89. struct arbiter arbiters[ARBITERS] =
  90. {
  91. { /* L2 cache arbiter */
  92. .instance = regi_marb_foo,
  93. .nbr_regions = 2,
  94. .nbr_clients = 15
  95. },
  96. { /* DDR2 arbiter */
  97. .instance = regi_marb_bar,
  98. .nbr_regions = 1,
  99. .nbr_clients = 9
  100. }
  101. };
  102. static int max_bandwidth[NBR_OF_REGIONS] = {SDRAM_BANDWIDTH, INTMEM_BANDWIDTH};
  103. DEFINE_SPINLOCK(arbiter_lock);
  104. static irqreturn_t
  105. crisv32_foo_arbiter_irq(int irq, void *dev_id);
  106. static irqreturn_t
  107. crisv32_bar_arbiter_irq(int irq, void *dev_id);
  108. /*
  109. * "I'm the arbiter, I know the score.
  110. * From square one I'll be watching all 64."
  111. * (memory arbiter slots, that is)
  112. *
  113. * Or in other words:
  114. * Program the memory arbiter slots for "region" according to what's
  115. * in requested_slots[] and active_clients[], while minimizing
  116. * latency. A caller may pass a non-zero positive amount for
  117. * "unused_slots", which must then be the unallocated, remaining
  118. * number of slots, free to hand out to any client.
  119. */
  120. static void crisv32_arbiter_config(int arbiter, int region, int unused_slots)
  121. {
  122. int slot;
  123. int client;
  124. int interval = 0;
  125. /*
  126. * This vector corresponds to the hardware arbiter slots (see
  127. * the hardware documentation for semantics). We initialize
  128. * each slot with a suitable sentinel value outside the valid
  129. * range {0 .. NBR_OF_CLIENTS - 1} and replace them with
  130. * client indexes. Then it's fed to the hardware.
  131. */
  132. s8 val[NBR_OF_SLOTS];
  133. for (slot = 0; slot < NBR_OF_SLOTS; slot++)
  134. val[slot] = -1;
  135. for (client = 0; client < arbiters[arbiter].nbr_clients; client++) {
  136. int pos;
  137. /* Allocate the requested non-zero number of slots, but
  138. * also give clients with zero-requests one slot each
  139. * while stocks last. We do the latter here, in client
  140. * order. This makes sure zero-request clients are the
  141. * first to get to any spare slots, else those slots
  142. * could, when bandwidth is allocated close to the limit,
  143. * all be allocated to low-index non-zero-request clients
  144. * in the default-fill loop below. Another positive but
  145. * secondary effect is a somewhat better spread of the
  146. * zero-bandwidth clients in the vector, avoiding some of
  147. * the latency that could otherwise be caused by the
  148. * partitioning of non-zero-bandwidth clients at low
  149. * indexes and zero-bandwidth clients at high
  150. * indexes. (Note that this spreading can only affect the
  151. * unallocated bandwidth.) All the above only matters for
  152. * memory-intensive situations, of course.
  153. */
  154. if (!arbiters[arbiter].requested_slots[region][client]) {
  155. /*
  156. * Skip inactive clients. Also skip zero-slot
  157. * allocations in this pass when there are no known
  158. * free slots.
  159. */
  160. if (!arbiters[arbiter].active_clients[region][client] ||
  161. unused_slots <= 0)
  162. continue;
  163. unused_slots--;
  164. /* Only allocate one slot for this client. */
  165. interval = NBR_OF_SLOTS;
  166. } else
  167. interval = NBR_OF_SLOTS /
  168. arbiters[arbiter].requested_slots[region][client];
  169. pos = 0;
  170. while (pos < NBR_OF_SLOTS) {
  171. if (val[pos] >= 0)
  172. pos++;
  173. else {
  174. val[pos] = client;
  175. pos += interval;
  176. }
  177. }
  178. }
  179. client = 0;
  180. for (slot = 0; slot < NBR_OF_SLOTS; slot++) {
  181. /*
  182. * Allocate remaining slots in round-robin
  183. * client-number order for active clients. For this
  184. * pass, we ignore requested bandwidth and previous
  185. * allocations.
  186. */
  187. if (val[slot] < 0) {
  188. int first = client;
  189. while (!arbiters[arbiter].active_clients[region][client]) {
  190. client = (client + 1) %
  191. arbiters[arbiter].nbr_clients;
  192. if (client == first)
  193. break;
  194. }
  195. val[slot] = client;
  196. client = (client + 1) % arbiters[arbiter].nbr_clients;
  197. }
  198. if (arbiter == 0) {
  199. if (region == EXT_REGION)
  200. REG_WR_INT_VECT(marb_foo, regi_marb_foo,
  201. rw_l2_slots, slot, val[slot]);
  202. else if (region == INT_REGION)
  203. REG_WR_INT_VECT(marb_foo, regi_marb_foo,
  204. rw_intm_slots, slot, val[slot]);
  205. } else {
  206. REG_WR_INT_VECT(marb_bar, regi_marb_bar,
  207. rw_ddr2_slots, slot, val[slot]);
  208. }
  209. }
  210. }
  211. extern char _stext[], _etext[];
  212. static void crisv32_arbiter_init(void)
  213. {
  214. static int initialized;
  215. if (initialized)
  216. return;
  217. initialized = 1;
  218. /*
  219. * CPU caches are always set to active, but with zero
  220. * bandwidth allocated. It should be ok to allocate zero
  221. * bandwidth for the caches, because DMA for other channels
  222. * will supposedly finish, once their programmed amount is
  223. * done, and then the caches will get access according to the
  224. * "fixed scheme" for unclaimed slots. Though, if for some
  225. * use-case somewhere, there's a maximum CPU latency for
  226. * e.g. some interrupt, we have to start allocating specific
  227. * bandwidth for the CPU caches too.
  228. */
  229. arbiters[0].active_clients[EXT_REGION][11] = 1;
  230. arbiters[0].active_clients[EXT_REGION][12] = 1;
  231. crisv32_arbiter_config(0, EXT_REGION, 0);
  232. crisv32_arbiter_config(0, INT_REGION, 0);
  233. crisv32_arbiter_config(1, EXT_REGION, 0);
  234. if (request_irq(MEMARB_FOO_INTR_VECT, crisv32_foo_arbiter_irq,
  235. 0, "arbiter", NULL))
  236. printk(KERN_ERR "Couldn't allocate arbiter IRQ\n");
  237. if (request_irq(MEMARB_BAR_INTR_VECT, crisv32_bar_arbiter_irq,
  238. 0, "arbiter", NULL))
  239. printk(KERN_ERR "Couldn't allocate arbiter IRQ\n");
  240. #ifndef CONFIG_ETRAX_KGDB
  241. /* Global watch for writes to kernel text segment. */
  242. crisv32_arbiter_watch(virt_to_phys(_stext), _etext - _stext,
  243. MARB_CLIENTS(arbiter_all_clients, arbiter_bar_all_clients),
  244. arbiter_all_write, NULL);
  245. #endif
  246. /* Set up max burst sizes by default */
  247. REG_WR_INT(marb_bar, regi_marb_bar, rw_h264_rd_burst, 3);
  248. REG_WR_INT(marb_bar, regi_marb_bar, rw_h264_wr_burst, 3);
  249. REG_WR_INT(marb_bar, regi_marb_bar, rw_ccd_burst, 3);
  250. REG_WR_INT(marb_bar, regi_marb_bar, rw_vin_wr_burst, 3);
  251. REG_WR_INT(marb_bar, regi_marb_bar, rw_vin_rd_burst, 3);
  252. REG_WR_INT(marb_bar, regi_marb_bar, rw_sclr_rd_burst, 3);
  253. REG_WR_INT(marb_bar, regi_marb_bar, rw_vout_burst, 3);
  254. REG_WR_INT(marb_bar, regi_marb_bar, rw_sclr_fifo_burst, 3);
  255. REG_WR_INT(marb_bar, regi_marb_bar, rw_l2cache_burst, 3);
  256. }
  257. int crisv32_arbiter_allocate_bandwidth(int client, int region,
  258. unsigned long bandwidth)
  259. {
  260. int i;
  261. int total_assigned = 0;
  262. int total_clients = 0;
  263. int req;
  264. int arbiter = 0;
  265. crisv32_arbiter_init();
  266. if (client & 0xffff0000) {
  267. arbiter = 1;
  268. client >>= 16;
  269. }
  270. for (i = 0; i < arbiters[arbiter].nbr_clients; i++) {
  271. total_assigned += arbiters[arbiter].requested_slots[region][i];
  272. total_clients += arbiters[arbiter].active_clients[region][i];
  273. }
  274. /* Avoid division by 0 for 0-bandwidth requests. */
  275. req = bandwidth == 0
  276. ? 0 : NBR_OF_SLOTS / (max_bandwidth[region] / bandwidth);
  277. /*
  278. * We make sure that there are enough slots only for non-zero
  279. * requests. Requesting 0 bandwidth *may* allocate slots,
  280. * though if all bandwidth is allocated, such a client won't
  281. * get any and will have to rely on getting memory access
  282. * according to the fixed scheme that's the default when one
  283. * of the slot-allocated clients doesn't claim their slot.
  284. */
  285. if (total_assigned + req > NBR_OF_SLOTS)
  286. return -ENOMEM;
  287. arbiters[arbiter].active_clients[region][client] = 1;
  288. arbiters[arbiter].requested_slots[region][client] = req;
  289. crisv32_arbiter_config(arbiter, region, NBR_OF_SLOTS - total_assigned);
  290. /* Propagate allocation from foo to bar */
  291. if (arbiter == 0)
  292. crisv32_arbiter_allocate_bandwidth(8 << 16,
  293. EXT_REGION, bandwidth);
  294. return 0;
  295. }
  296. /*
  297. * Main entry for bandwidth deallocation.
  298. *
  299. * Strictly speaking, for a somewhat constant set of clients where
  300. * each client gets a constant bandwidth and is just enabled or
  301. * disabled (somewhat dynamically), no action is necessary here to
  302. * avoid starvation for non-zero-allocation clients, as the allocated
  303. * slots will just be unused. However, handing out those unused slots
  304. * to active clients avoids needless latency if the "fixed scheme"
  305. * would give unclaimed slots to an eager low-index client.
  306. */
  307. void crisv32_arbiter_deallocate_bandwidth(int client, int region)
  308. {
  309. int i;
  310. int total_assigned = 0;
  311. int arbiter = 0;
  312. if (client & 0xffff0000)
  313. arbiter = 1;
  314. arbiters[arbiter].requested_slots[region][client] = 0;
  315. arbiters[arbiter].active_clients[region][client] = 0;
  316. for (i = 0; i < arbiters[arbiter].nbr_clients; i++)
  317. total_assigned += arbiters[arbiter].requested_slots[region][i];
  318. crisv32_arbiter_config(arbiter, region, NBR_OF_SLOTS - total_assigned);
  319. }
  320. int crisv32_arbiter_watch(unsigned long start, unsigned long size,
  321. unsigned long clients, unsigned long accesses,
  322. watch_callback *cb)
  323. {
  324. int i;
  325. int arbiter;
  326. int used[2];
  327. int ret = 0;
  328. crisv32_arbiter_init();
  329. if (start > 0x80000000) {
  330. printk(KERN_ERR "Arbiter: %lX doesn't look like a "
  331. "physical address", start);
  332. return -EFAULT;
  333. }
  334. spin_lock(&arbiter_lock);
  335. if (clients & 0xffff)
  336. used[0] = 1;
  337. if (clients & 0xffff0000)
  338. used[1] = 1;
  339. for (arbiter = 0; arbiter < ARBITERS; arbiter++) {
  340. if (!used[arbiter])
  341. continue;
  342. for (i = 0; i < NUMBER_OF_BP; i++) {
  343. if (!watches[arbiter][i].used) {
  344. unsigned intr_mask;
  345. if (arbiter)
  346. intr_mask = REG_RD_INT(marb_bar,
  347. regi_marb_bar, rw_intr_mask);
  348. else
  349. intr_mask = REG_RD_INT(marb_foo,
  350. regi_marb_foo, rw_intr_mask);
  351. watches[arbiter][i].used = 1;
  352. watches[arbiter][i].start = start;
  353. watches[arbiter][i].end = start + size;
  354. watches[arbiter][i].cb = cb;
  355. ret |= (i + 1) << (arbiter + 8);
  356. if (arbiter) {
  357. REG_WR_INT(marb_bar_bp,
  358. watches[arbiter][i].instance,
  359. rw_first_addr,
  360. watches[arbiter][i].start);
  361. REG_WR_INT(marb_bar_bp,
  362. watches[arbiter][i].instance,
  363. rw_last_addr,
  364. watches[arbiter][i].end);
  365. REG_WR_INT(marb_bar_bp,
  366. watches[arbiter][i].instance,
  367. rw_op, accesses);
  368. REG_WR_INT(marb_bar_bp,
  369. watches[arbiter][i].instance,
  370. rw_clients,
  371. clients & 0xffff);
  372. } else {
  373. REG_WR_INT(marb_foo_bp,
  374. watches[arbiter][i].instance,
  375. rw_first_addr,
  376. watches[arbiter][i].start);
  377. REG_WR_INT(marb_foo_bp,
  378. watches[arbiter][i].instance,
  379. rw_last_addr,
  380. watches[arbiter][i].end);
  381. REG_WR_INT(marb_foo_bp,
  382. watches[arbiter][i].instance,
  383. rw_op, accesses);
  384. REG_WR_INT(marb_foo_bp,
  385. watches[arbiter][i].instance,
  386. rw_clients, clients >> 16);
  387. }
  388. if (i == 0)
  389. intr_mask |= 1;
  390. else if (i == 1)
  391. intr_mask |= 2;
  392. else if (i == 2)
  393. intr_mask |= 4;
  394. else if (i == 3)
  395. intr_mask |= 8;
  396. if (arbiter)
  397. REG_WR_INT(marb_bar, regi_marb_bar,
  398. rw_intr_mask, intr_mask);
  399. else
  400. REG_WR_INT(marb_foo, regi_marb_foo,
  401. rw_intr_mask, intr_mask);
  402. spin_unlock(&arbiter_lock);
  403. break;
  404. }
  405. }
  406. }
  407. spin_unlock(&arbiter_lock);
  408. if (ret)
  409. return ret;
  410. else
  411. return -ENOMEM;
  412. }
  413. int crisv32_arbiter_unwatch(int id)
  414. {
  415. int arbiter;
  416. int intr_mask;
  417. crisv32_arbiter_init();
  418. spin_lock(&arbiter_lock);
  419. for (arbiter = 0; arbiter < ARBITERS; arbiter++) {
  420. int id2;
  421. if (arbiter)
  422. intr_mask = REG_RD_INT(marb_bar, regi_marb_bar,
  423. rw_intr_mask);
  424. else
  425. intr_mask = REG_RD_INT(marb_foo, regi_marb_foo,
  426. rw_intr_mask);
  427. id2 = (id & (0xff << (arbiter + 8))) >> (arbiter + 8);
  428. if (id2 == 0)
  429. continue;
  430. id2--;
  431. if ((id2 >= NUMBER_OF_BP) || (!watches[arbiter][id2].used)) {
  432. spin_unlock(&arbiter_lock);
  433. return -EINVAL;
  434. }
  435. memset(&watches[arbiter][id2], 0,
  436. sizeof(struct crisv32_watch_entry));
  437. if (id2 == 0)
  438. intr_mask &= ~1;
  439. else if (id2 == 1)
  440. intr_mask &= ~2;
  441. else if (id2 == 2)
  442. intr_mask &= ~4;
  443. else if (id2 == 3)
  444. intr_mask &= ~8;
  445. if (arbiter)
  446. REG_WR_INT(marb_bar, regi_marb_bar, rw_intr_mask,
  447. intr_mask);
  448. else
  449. REG_WR_INT(marb_foo, regi_marb_foo, rw_intr_mask,
  450. intr_mask);
  451. }
  452. spin_unlock(&arbiter_lock);
  453. return 0;
  454. }
  455. extern void show_registers(struct pt_regs *regs);
  456. static irqreturn_t
  457. crisv32_foo_arbiter_irq(int irq, void *dev_id)
  458. {
  459. reg_marb_foo_r_masked_intr masked_intr =
  460. REG_RD(marb_foo, regi_marb_foo, r_masked_intr);
  461. reg_marb_foo_bp_r_brk_clients r_clients;
  462. reg_marb_foo_bp_r_brk_addr r_addr;
  463. reg_marb_foo_bp_r_brk_op r_op;
  464. reg_marb_foo_bp_r_brk_first_client r_first;
  465. reg_marb_foo_bp_r_brk_size r_size;
  466. reg_marb_foo_bp_rw_ack ack = {0};
  467. reg_marb_foo_rw_ack_intr ack_intr = {
  468. .bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1
  469. };
  470. struct crisv32_watch_entry *watch;
  471. unsigned arbiter = (unsigned)dev_id;
  472. masked_intr = REG_RD(marb_foo, regi_marb_foo, r_masked_intr);
  473. if (masked_intr.bp0)
  474. watch = &watches[arbiter][0];
  475. else if (masked_intr.bp1)
  476. watch = &watches[arbiter][1];
  477. else if (masked_intr.bp2)
  478. watch = &watches[arbiter][2];
  479. else if (masked_intr.bp3)
  480. watch = &watches[arbiter][3];
  481. else
  482. return IRQ_NONE;
  483. /* Retrieve all useful information and print it. */
  484. r_clients = REG_RD(marb_foo_bp, watch->instance, r_brk_clients);
  485. r_addr = REG_RD(marb_foo_bp, watch->instance, r_brk_addr);
  486. r_op = REG_RD(marb_foo_bp, watch->instance, r_brk_op);
  487. r_first = REG_RD(marb_foo_bp, watch->instance, r_brk_first_client);
  488. r_size = REG_RD(marb_foo_bp, watch->instance, r_brk_size);
  489. printk(KERN_DEBUG "Arbiter IRQ\n");
  490. printk(KERN_DEBUG "Clients %X addr %X op %X first %X size %X\n",
  491. REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_clients, r_clients),
  492. REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_addr, r_addr),
  493. REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_op, r_op),
  494. REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_first_client, r_first),
  495. REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_size, r_size));
  496. REG_WR(marb_foo_bp, watch->instance, rw_ack, ack);
  497. REG_WR(marb_foo, regi_marb_foo, rw_ack_intr, ack_intr);
  498. printk(KERN_DEBUG "IRQ occurred at %X\n", (unsigned)get_irq_regs());
  499. if (watch->cb)
  500. watch->cb();
  501. return IRQ_HANDLED;
  502. }
  503. static irqreturn_t
  504. crisv32_bar_arbiter_irq(int irq, void *dev_id)
  505. {
  506. reg_marb_bar_r_masked_intr masked_intr =
  507. REG_RD(marb_bar, regi_marb_bar, r_masked_intr);
  508. reg_marb_bar_bp_r_brk_clients r_clients;
  509. reg_marb_bar_bp_r_brk_addr r_addr;
  510. reg_marb_bar_bp_r_brk_op r_op;
  511. reg_marb_bar_bp_r_brk_first_client r_first;
  512. reg_marb_bar_bp_r_brk_size r_size;
  513. reg_marb_bar_bp_rw_ack ack = {0};
  514. reg_marb_bar_rw_ack_intr ack_intr = {
  515. .bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1
  516. };
  517. struct crisv32_watch_entry *watch;
  518. unsigned arbiter = (unsigned)dev_id;
  519. masked_intr = REG_RD(marb_bar, regi_marb_bar, r_masked_intr);
  520. if (masked_intr.bp0)
  521. watch = &watches[arbiter][0];
  522. else if (masked_intr.bp1)
  523. watch = &watches[arbiter][1];
  524. else if (masked_intr.bp2)
  525. watch = &watches[arbiter][2];
  526. else if (masked_intr.bp3)
  527. watch = &watches[arbiter][3];
  528. else
  529. return IRQ_NONE;
  530. /* Retrieve all useful information and print it. */
  531. r_clients = REG_RD(marb_bar_bp, watch->instance, r_brk_clients);
  532. r_addr = REG_RD(marb_bar_bp, watch->instance, r_brk_addr);
  533. r_op = REG_RD(marb_bar_bp, watch->instance, r_brk_op);
  534. r_first = REG_RD(marb_bar_bp, watch->instance, r_brk_first_client);
  535. r_size = REG_RD(marb_bar_bp, watch->instance, r_brk_size);
  536. printk(KERN_DEBUG "Arbiter IRQ\n");
  537. printk(KERN_DEBUG "Clients %X addr %X op %X first %X size %X\n",
  538. REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_clients, r_clients),
  539. REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_addr, r_addr),
  540. REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_op, r_op),
  541. REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_first_client, r_first),
  542. REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_size, r_size));
  543. REG_WR(marb_bar_bp, watch->instance, rw_ack, ack);
  544. REG_WR(marb_bar, regi_marb_bar, rw_ack_intr, ack_intr);
  545. printk(KERN_DEBUG "IRQ occurred at %X\n", (unsigned)get_irq_regs()->erp);
  546. if (watch->cb)
  547. watch->cb();
  548. return IRQ_HANDLED;
  549. }