mtu3_qmu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639
  1. /*
  2. * mtu3_qmu.c - Queue Management Unit driver for device controller
  3. *
  4. * Copyright (C) 2016 MediaTek Inc.
  5. *
  6. * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
  7. *
  8. * This software is licensed under the terms of the GNU General Public
  9. * License version 2, as published by the Free Software Foundation, and
  10. * may be copied, distributed, and modified under those terms.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. */
  18. /*
  19. * Queue Management Unit (QMU) is designed to unload SW effort
  20. * to serve DMA interrupts.
  21. * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
  22. * SW links data buffers and triggers QMU to send / receive data to
  23. * host / from device at a time.
  24. * And now only GPD is supported.
  25. *
  26. * For more detailed information, please refer to QMU Programming Guide
  27. */
  28. #include <linux/dmapool.h>
  29. #include <linux/iopoll.h>
  30. #include "mtu3.h"
  31. #define QMU_CHECKSUM_LEN 16
  32. #define GPD_FLAGS_HWO BIT(0)
  33. #define GPD_FLAGS_BDP BIT(1)
  34. #define GPD_FLAGS_BPS BIT(2)
  35. #define GPD_FLAGS_IOC BIT(7)
  36. #define GPD_EXT_FLAG_ZLP BIT(5)
  37. #define GPD_EXT_NGP(x) (((x) & 0xf) << 4)
  38. #define GPD_EXT_BUF(x) (((x) & 0xf) << 0)
  39. #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
  40. #define HILO_DMA(hi, lo) \
  41. ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
  42. static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum)
  43. {
  44. u32 txcpr;
  45. u32 txhiar;
  46. txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
  47. txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
  48. return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr);
  49. }
  50. static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum)
  51. {
  52. u32 rxcpr;
  53. u32 rxhiar;
  54. rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
  55. rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
  56. return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr);
  57. }
  58. static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
  59. {
  60. u32 tqhiar;
  61. mtu3_writel(mbase, USB_QMU_TQSAR(epnum),
  62. cpu_to_le32(lower_32_bits(dma)));
  63. tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
  64. tqhiar &= ~QMU_START_ADDR_HI_MSK;
  65. tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
  66. mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar);
  67. }
  68. static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
  69. {
  70. u32 rqhiar;
  71. mtu3_writel(mbase, USB_QMU_RQSAR(epnum),
  72. cpu_to_le32(lower_32_bits(dma)));
  73. rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
  74. rqhiar &= ~QMU_START_ADDR_HI_MSK;
  75. rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
  76. mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar);
  77. }
  78. static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
  79. dma_addr_t dma_addr)
  80. {
  81. dma_addr_t dma_base = ring->dma;
  82. struct qmu_gpd *gpd_head = ring->start;
  83. u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
  84. if (offset >= MAX_GPD_NUM)
  85. return NULL;
  86. return gpd_head + offset;
  87. }
  88. static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
  89. struct qmu_gpd *gpd)
  90. {
  91. dma_addr_t dma_base = ring->dma;
  92. struct qmu_gpd *gpd_head = ring->start;
  93. u32 offset;
  94. offset = gpd - gpd_head;
  95. if (offset >= MAX_GPD_NUM)
  96. return 0;
  97. return dma_base + (offset * sizeof(*gpd));
  98. }
  99. static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
  100. {
  101. ring->start = gpd;
  102. ring->enqueue = gpd;
  103. ring->dequeue = gpd;
  104. ring->end = gpd + MAX_GPD_NUM - 1;
  105. }
  106. static void reset_gpd_list(struct mtu3_ep *mep)
  107. {
  108. struct mtu3_gpd_ring *ring = &mep->gpd_ring;
  109. struct qmu_gpd *gpd = ring->start;
  110. if (gpd) {
  111. gpd->flag &= ~GPD_FLAGS_HWO;
  112. gpd_ring_init(ring, gpd);
  113. }
  114. }
  115. int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
  116. {
  117. struct qmu_gpd *gpd;
  118. struct mtu3_gpd_ring *ring = &mep->gpd_ring;
  119. /* software own all gpds as default */
  120. gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
  121. if (gpd == NULL)
  122. return -ENOMEM;
  123. gpd_ring_init(ring, gpd);
  124. return 0;
  125. }
  126. void mtu3_gpd_ring_free(struct mtu3_ep *mep)
  127. {
  128. struct mtu3_gpd_ring *ring = &mep->gpd_ring;
  129. dma_pool_free(mep->mtu->qmu_gpd_pool,
  130. ring->start, ring->dma);
  131. memset(ring, 0, sizeof(*ring));
  132. }
  133. /*
  134. * calculate check sum of a gpd or bd
  135. * add "noinline" and "mb" to prevent wrong calculation
  136. */
  137. static noinline u8 qmu_calc_checksum(u8 *data)
  138. {
  139. u8 chksum = 0;
  140. int i;
  141. data[1] = 0x0; /* set checksum to 0 */
  142. mb(); /* ensure the gpd/bd is really up-to-date */
  143. for (i = 0; i < QMU_CHECKSUM_LEN; i++)
  144. chksum += data[i];
  145. /* Default: HWO=1, @flag[bit0] */
  146. chksum += 1;
  147. return 0xFF - chksum;
  148. }
  149. void mtu3_qmu_resume(struct mtu3_ep *mep)
  150. {
  151. struct mtu3 *mtu = mep->mtu;
  152. void __iomem *mbase = mtu->mac_base;
  153. int epnum = mep->epnum;
  154. u32 offset;
  155. offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
  156. mtu3_writel(mbase, offset, QMU_Q_RESUME);
  157. if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
  158. mtu3_writel(mbase, offset, QMU_Q_RESUME);
  159. }
  160. static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
  161. {
  162. if (ring->enqueue < ring->end)
  163. ring->enqueue++;
  164. else
  165. ring->enqueue = ring->start;
  166. return ring->enqueue;
  167. }
  168. static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
  169. {
  170. if (ring->dequeue < ring->end)
  171. ring->dequeue++;
  172. else
  173. ring->dequeue = ring->start;
  174. return ring->dequeue;
  175. }
  176. /* check if a ring is emtpy */
  177. static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
  178. {
  179. struct qmu_gpd *enq = ring->enqueue;
  180. struct qmu_gpd *next;
  181. if (ring->enqueue < ring->end)
  182. next = enq + 1;
  183. else
  184. next = ring->start;
  185. /* one gpd is reserved to simplify gpd preparation */
  186. return next == ring->dequeue;
  187. }
  188. int mtu3_prepare_transfer(struct mtu3_ep *mep)
  189. {
  190. return gpd_ring_empty(&mep->gpd_ring);
  191. }
  192. static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
  193. {
  194. struct qmu_gpd *enq;
  195. struct mtu3_gpd_ring *ring = &mep->gpd_ring;
  196. struct qmu_gpd *gpd = ring->enqueue;
  197. struct usb_request *req = &mreq->request;
  198. dma_addr_t enq_dma;
  199. u16 ext_addr;
  200. /* set all fields to zero as default value */
  201. memset(gpd, 0, sizeof(*gpd));
  202. gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
  203. ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma));
  204. gpd->buf_len = cpu_to_le16(req->length);
  205. gpd->flag |= GPD_FLAGS_IOC;
  206. /* get the next GPD */
  207. enq = advance_enq_gpd(ring);
  208. enq_dma = gpd_virt_to_dma(ring, enq);
  209. dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
  210. mep->epnum, gpd, enq, enq_dma);
  211. enq->flag &= ~GPD_FLAGS_HWO;
  212. gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
  213. ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma));
  214. gpd->tx_ext_addr = cpu_to_le16(ext_addr);
  215. if (req->zero)
  216. gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
  217. gpd->chksum = qmu_calc_checksum((u8 *)gpd);
  218. gpd->flag |= GPD_FLAGS_HWO;
  219. mreq->gpd = gpd;
  220. return 0;
  221. }
  222. static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
  223. {
  224. struct qmu_gpd *enq;
  225. struct mtu3_gpd_ring *ring = &mep->gpd_ring;
  226. struct qmu_gpd *gpd = ring->enqueue;
  227. struct usb_request *req = &mreq->request;
  228. dma_addr_t enq_dma;
  229. u16 ext_addr;
  230. /* set all fields to zero as default value */
  231. memset(gpd, 0, sizeof(*gpd));
  232. gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
  233. ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma));
  234. gpd->data_buf_len = cpu_to_le16(req->length);
  235. gpd->flag |= GPD_FLAGS_IOC;
  236. /* get the next GPD */
  237. enq = advance_enq_gpd(ring);
  238. enq_dma = gpd_virt_to_dma(ring, enq);
  239. dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
  240. mep->epnum, gpd, enq, enq_dma);
  241. enq->flag &= ~GPD_FLAGS_HWO;
  242. gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
  243. ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma));
  244. gpd->rx_ext_addr = cpu_to_le16(ext_addr);
  245. gpd->chksum = qmu_calc_checksum((u8 *)gpd);
  246. gpd->flag |= GPD_FLAGS_HWO;
  247. mreq->gpd = gpd;
  248. return 0;
  249. }
  250. void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
  251. {
  252. if (mep->is_in)
  253. mtu3_prepare_tx_gpd(mep, mreq);
  254. else
  255. mtu3_prepare_rx_gpd(mep, mreq);
  256. }
  257. int mtu3_qmu_start(struct mtu3_ep *mep)
  258. {
  259. struct mtu3 *mtu = mep->mtu;
  260. void __iomem *mbase = mtu->mac_base;
  261. struct mtu3_gpd_ring *ring = &mep->gpd_ring;
  262. u8 epnum = mep->epnum;
  263. if (mep->is_in) {
  264. /* set QMU start address */
  265. write_txq_start_addr(mbase, epnum, ring->dma);
  266. mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
  267. mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum));
  268. /* send zero length packet according to ZLP flag in GPD */
  269. mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
  270. mtu3_writel(mbase, U3D_TQERRIESR0,
  271. QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
  272. if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
  273. dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
  274. return 0;
  275. }
  276. mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
  277. } else {
  278. write_rxq_start_addr(mbase, epnum, ring->dma);
  279. mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
  280. mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum));
  281. /* don't expect ZLP */
  282. mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
  283. /* move to next GPD when receive ZLP */
  284. mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
  285. mtu3_writel(mbase, U3D_RQERRIESR0,
  286. QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
  287. mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
  288. if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
  289. dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
  290. return 0;
  291. }
  292. mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
  293. }
  294. return 0;
  295. }
  296. /* may called in atomic context */
  297. void mtu3_qmu_stop(struct mtu3_ep *mep)
  298. {
  299. struct mtu3 *mtu = mep->mtu;
  300. void __iomem *mbase = mtu->mac_base;
  301. int epnum = mep->epnum;
  302. u32 value = 0;
  303. u32 qcsr;
  304. int ret;
  305. qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
  306. if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
  307. dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
  308. return;
  309. }
  310. mtu3_writel(mbase, qcsr, QMU_Q_STOP);
  311. ret = readl_poll_timeout_atomic(mbase + qcsr, value,
  312. !(value & QMU_Q_ACTIVE), 1, 1000);
  313. if (ret) {
  314. dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
  315. return;
  316. }
  317. dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
  318. }
  319. void mtu3_qmu_flush(struct mtu3_ep *mep)
  320. {
  321. dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
  322. ((mep->is_in) ? "TX" : "RX"));
  323. /*Stop QMU */
  324. mtu3_qmu_stop(mep);
  325. reset_gpd_list(mep);
  326. }
  327. /*
  328. * QMU can't transfer zero length packet directly (a hardware limit
  329. * on old SoCs), so when needs to send ZLP, we intentionally trigger
  330. * a length error interrupt, and in the ISR sends a ZLP by BMU.
  331. */
  332. static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
  333. {
  334. struct mtu3_ep *mep = mtu->in_eps + epnum;
  335. struct mtu3_gpd_ring *ring = &mep->gpd_ring;
  336. void __iomem *mbase = mtu->mac_base;
  337. struct qmu_gpd *gpd_current = NULL;
  338. struct usb_request *req = NULL;
  339. struct mtu3_request *mreq;
  340. dma_addr_t cur_gpd_dma;
  341. u32 txcsr = 0;
  342. int ret;
  343. mreq = next_request(mep);
  344. if (mreq && mreq->request.length == 0)
  345. req = &mreq->request;
  346. else
  347. return;
  348. cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
  349. gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
  350. if (le16_to_cpu(gpd_current->buf_len) != 0) {
  351. dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
  352. return;
  353. }
  354. dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
  355. mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
  356. ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
  357. txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
  358. if (ret) {
  359. dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
  360. return;
  361. }
  362. mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
  363. /* by pass the current GDP */
  364. gpd_current->flag |= GPD_FLAGS_BPS;
  365. gpd_current->chksum = qmu_calc_checksum((u8 *)gpd_current);
  366. gpd_current->flag |= GPD_FLAGS_HWO;
  367. /*enable DMAREQEN, switch back to QMU mode */
  368. mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
  369. mtu3_qmu_resume(mep);
  370. }
  371. /*
  372. * NOTE: request list maybe is already empty as following case:
  373. * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
  374. * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
  375. * tasklet process both of them)-->qmu_interrupt for second one.
  376. * To avoid upper case, put qmu_done_tx in ISR directly to process it.
  377. */
  378. static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
  379. {
  380. struct mtu3_ep *mep = mtu->in_eps + epnum;
  381. struct mtu3_gpd_ring *ring = &mep->gpd_ring;
  382. void __iomem *mbase = mtu->mac_base;
  383. struct qmu_gpd *gpd = ring->dequeue;
  384. struct qmu_gpd *gpd_current = NULL;
  385. struct usb_request *request = NULL;
  386. struct mtu3_request *mreq;
  387. dma_addr_t cur_gpd_dma;
  388. /*transfer phy address got from QMU register to virtual address */
  389. cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
  390. gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
  391. dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
  392. __func__, epnum, gpd, gpd_current, ring->enqueue);
  393. while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
  394. mreq = next_request(mep);
  395. if (mreq == NULL || mreq->gpd != gpd) {
  396. dev_err(mtu->dev, "no correct TX req is found\n");
  397. break;
  398. }
  399. request = &mreq->request;
  400. request->actual = le16_to_cpu(gpd->buf_len);
  401. mtu3_req_complete(mep, request, 0);
  402. gpd = advance_deq_gpd(ring);
  403. }
  404. dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
  405. __func__, epnum, ring->dequeue, ring->enqueue);
  406. }
  407. static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
  408. {
  409. struct mtu3_ep *mep = mtu->out_eps + epnum;
  410. struct mtu3_gpd_ring *ring = &mep->gpd_ring;
  411. void __iomem *mbase = mtu->mac_base;
  412. struct qmu_gpd *gpd = ring->dequeue;
  413. struct qmu_gpd *gpd_current = NULL;
  414. struct usb_request *req = NULL;
  415. struct mtu3_request *mreq;
  416. dma_addr_t cur_gpd_dma;
  417. cur_gpd_dma = read_rxq_cur_addr(mbase, epnum);
  418. gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
  419. dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
  420. __func__, epnum, gpd, gpd_current, ring->enqueue);
  421. while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
  422. mreq = next_request(mep);
  423. if (mreq == NULL || mreq->gpd != gpd) {
  424. dev_err(mtu->dev, "no correct RX req is found\n");
  425. break;
  426. }
  427. req = &mreq->request;
  428. req->actual = le16_to_cpu(gpd->buf_len);
  429. mtu3_req_complete(mep, req, 0);
  430. gpd = advance_deq_gpd(ring);
  431. }
  432. dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
  433. __func__, epnum, ring->dequeue, ring->enqueue);
  434. }
  435. static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
  436. {
  437. int i;
  438. for (i = 1; i < mtu->num_eps; i++) {
  439. if (done_status & QMU_RX_DONE_INT(i))
  440. qmu_done_rx(mtu, i);
  441. if (done_status & QMU_TX_DONE_INT(i))
  442. qmu_done_tx(mtu, i);
  443. }
  444. }
  445. static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
  446. {
  447. void __iomem *mbase = mtu->mac_base;
  448. u32 errval;
  449. int i;
  450. if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
  451. errval = mtu3_readl(mbase, U3D_RQERRIR0);
  452. for (i = 1; i < mtu->num_eps; i++) {
  453. if (errval & QMU_RX_CS_ERR(i))
  454. dev_err(mtu->dev, "Rx %d CS error!\n", i);
  455. if (errval & QMU_RX_LEN_ERR(i))
  456. dev_err(mtu->dev, "RX %d Length error\n", i);
  457. }
  458. mtu3_writel(mbase, U3D_RQERRIR0, errval);
  459. }
  460. if (qmu_status & RXQ_ZLPERR_INT) {
  461. errval = mtu3_readl(mbase, U3D_RQERRIR1);
  462. for (i = 1; i < mtu->num_eps; i++) {
  463. if (errval & QMU_RX_ZLP_ERR(i))
  464. dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
  465. }
  466. mtu3_writel(mbase, U3D_RQERRIR1, errval);
  467. }
  468. if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
  469. errval = mtu3_readl(mbase, U3D_TQERRIR0);
  470. for (i = 1; i < mtu->num_eps; i++) {
  471. if (errval & QMU_TX_CS_ERR(i))
  472. dev_err(mtu->dev, "Tx %d checksum error!\n", i);
  473. if (errval & QMU_TX_LEN_ERR(i))
  474. qmu_tx_zlp_error_handler(mtu, i);
  475. }
  476. mtu3_writel(mbase, U3D_TQERRIR0, errval);
  477. }
  478. }
  479. irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
  480. {
  481. void __iomem *mbase = mtu->mac_base;
  482. u32 qmu_status;
  483. u32 qmu_done_status;
  484. /* U3D_QISAR1 is read update */
  485. qmu_status = mtu3_readl(mbase, U3D_QISAR1);
  486. qmu_status &= mtu3_readl(mbase, U3D_QIER1);
  487. qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
  488. qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
  489. mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
  490. dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
  491. (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
  492. qmu_status);
  493. if (qmu_done_status)
  494. qmu_done_isr(mtu, qmu_done_status);
  495. if (qmu_status)
  496. qmu_exception_isr(mtu, qmu_status);
  497. return IRQ_HANDLED;
  498. }
  499. int mtu3_qmu_init(struct mtu3 *mtu)
  500. {
  501. compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
  502. mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
  503. QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
  504. if (!mtu->qmu_gpd_pool)
  505. return -ENOMEM;
  506. return 0;
  507. }
  508. void mtu3_qmu_exit(struct mtu3 *mtu)
  509. {
  510. dma_pool_destroy(mtu->qmu_gpd_pool);
  511. }