hidma.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919
  1. /*
  2. * Qualcomm Technologies HIDMA DMA engine interface
  3. *
  4. * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 and
  8. * only version 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. /*
  16. * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
  17. * Copyright (C) Semihalf 2009
  18. * Copyright (C) Ilya Yanok, Emcraft Systems 2010
  19. * Copyright (C) Alexander Popov, Promcontroller 2014
  20. *
  21. * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
  22. * (defines, structures and comments) was taken from MPC5121 DMA driver
  23. * written by Hongjun Chen <hong-jun.chen@freescale.com>.
  24. *
  25. * Approved as OSADL project by a majority of OSADL members and funded
  26. * by OSADL membership fees in 2009; for details see www.osadl.org.
  27. *
  28. * This program is free software; you can redistribute it and/or modify it
  29. * under the terms of the GNU General Public License as published by the Free
  30. * Software Foundation; either version 2 of the License, or (at your option)
  31. * any later version.
  32. *
  33. * This program is distributed in the hope that it will be useful, but WITHOUT
  34. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  35. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  36. * more details.
  37. *
  38. * The full GNU General Public License is included in this distribution in the
  39. * file called COPYING.
  40. */
  41. /* Linux Foundation elects GPLv2 license only. */
  42. #include <linux/dmaengine.h>
  43. #include <linux/dma-mapping.h>
  44. #include <linux/list.h>
  45. #include <linux/module.h>
  46. #include <linux/platform_device.h>
  47. #include <linux/slab.h>
  48. #include <linux/spinlock.h>
  49. #include <linux/of_dma.h>
  50. #include <linux/property.h>
  51. #include <linux/delay.h>
  52. #include <linux/acpi.h>
  53. #include <linux/irq.h>
  54. #include <linux/atomic.h>
  55. #include <linux/pm_runtime.h>
  56. #include <linux/msi.h>
  57. #include "../dmaengine.h"
  58. #include "hidma.h"
  59. /*
  60. * Default idle time is 2 seconds. This parameter can
  61. * be overridden by changing the following
  62. * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
  63. * during kernel boot.
  64. */
  65. #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
  66. #define HIDMA_ERR_INFO_SW 0xFF
  67. #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
  68. #define HIDMA_NR_DEFAULT_DESC 10
  69. #define HIDMA_MSI_INTS 11
  70. static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
  71. {
  72. return container_of(dmadev, struct hidma_dev, ddev);
  73. }
  74. static inline
  75. struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
  76. {
  77. return container_of(_lldevp, struct hidma_dev, lldev);
  78. }
  79. static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
  80. {
  81. return container_of(dmach, struct hidma_chan, chan);
  82. }
  83. static inline
  84. struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
  85. {
  86. return container_of(t, struct hidma_desc, desc);
  87. }
  88. static void hidma_free(struct hidma_dev *dmadev)
  89. {
  90. INIT_LIST_HEAD(&dmadev->ddev.channels);
  91. }
  92. static unsigned int nr_desc_prm;
  93. module_param(nr_desc_prm, uint, 0644);
  94. MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
  95. /* process completed descriptors */
  96. static void hidma_process_completed(struct hidma_chan *mchan)
  97. {
  98. struct dma_device *ddev = mchan->chan.device;
  99. struct hidma_dev *mdma = to_hidma_dev(ddev);
  100. struct dma_async_tx_descriptor *desc;
  101. dma_cookie_t last_cookie;
  102. struct hidma_desc *mdesc;
  103. struct hidma_desc *next;
  104. unsigned long irqflags;
  105. struct list_head list;
  106. INIT_LIST_HEAD(&list);
  107. /* Get all completed descriptors */
  108. spin_lock_irqsave(&mchan->lock, irqflags);
  109. list_splice_tail_init(&mchan->completed, &list);
  110. spin_unlock_irqrestore(&mchan->lock, irqflags);
  111. /* Execute callbacks and run dependencies */
  112. list_for_each_entry_safe(mdesc, next, &list, node) {
  113. enum dma_status llstat;
  114. struct dmaengine_desc_callback cb;
  115. struct dmaengine_result result;
  116. desc = &mdesc->desc;
  117. last_cookie = desc->cookie;
  118. spin_lock_irqsave(&mchan->lock, irqflags);
  119. dma_cookie_complete(desc);
  120. spin_unlock_irqrestore(&mchan->lock, irqflags);
  121. llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
  122. dmaengine_desc_get_callback(desc, &cb);
  123. dma_run_dependencies(desc);
  124. spin_lock_irqsave(&mchan->lock, irqflags);
  125. list_move(&mdesc->node, &mchan->free);
  126. if (llstat == DMA_COMPLETE) {
  127. mchan->last_success = last_cookie;
  128. result.result = DMA_TRANS_NOERROR;
  129. } else
  130. result.result = DMA_TRANS_ABORTED;
  131. spin_unlock_irqrestore(&mchan->lock, irqflags);
  132. dmaengine_desc_callback_invoke(&cb, &result);
  133. }
  134. }
  135. /*
  136. * Called once for each submitted descriptor.
  137. * PM is locked once for each descriptor that is currently
  138. * in execution.
  139. */
  140. static void hidma_callback(void *data)
  141. {
  142. struct hidma_desc *mdesc = data;
  143. struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
  144. struct dma_device *ddev = mchan->chan.device;
  145. struct hidma_dev *dmadev = to_hidma_dev(ddev);
  146. unsigned long irqflags;
  147. bool queued = false;
  148. spin_lock_irqsave(&mchan->lock, irqflags);
  149. if (mdesc->node.next) {
  150. /* Delete from the active list, add to completed list */
  151. list_move_tail(&mdesc->node, &mchan->completed);
  152. queued = true;
  153. /* calculate the next running descriptor */
  154. mchan->running = list_first_entry(&mchan->active,
  155. struct hidma_desc, node);
  156. }
  157. spin_unlock_irqrestore(&mchan->lock, irqflags);
  158. hidma_process_completed(mchan);
  159. if (queued) {
  160. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  161. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  162. }
  163. }
  164. static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
  165. {
  166. struct hidma_chan *mchan;
  167. struct dma_device *ddev;
  168. mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
  169. if (!mchan)
  170. return -ENOMEM;
  171. ddev = &dmadev->ddev;
  172. mchan->dma_sig = dma_sig;
  173. mchan->dmadev = dmadev;
  174. mchan->chan.device = ddev;
  175. dma_cookie_init(&mchan->chan);
  176. INIT_LIST_HEAD(&mchan->free);
  177. INIT_LIST_HEAD(&mchan->prepared);
  178. INIT_LIST_HEAD(&mchan->active);
  179. INIT_LIST_HEAD(&mchan->completed);
  180. spin_lock_init(&mchan->lock);
  181. list_add_tail(&mchan->chan.device_node, &ddev->channels);
  182. dmadev->ddev.chancnt++;
  183. return 0;
  184. }
  185. static void hidma_issue_task(unsigned long arg)
  186. {
  187. struct hidma_dev *dmadev = (struct hidma_dev *)arg;
  188. pm_runtime_get_sync(dmadev->ddev.dev);
  189. hidma_ll_start(dmadev->lldev);
  190. }
  191. static void hidma_issue_pending(struct dma_chan *dmach)
  192. {
  193. struct hidma_chan *mchan = to_hidma_chan(dmach);
  194. struct hidma_dev *dmadev = mchan->dmadev;
  195. unsigned long flags;
  196. int status;
  197. spin_lock_irqsave(&mchan->lock, flags);
  198. if (!mchan->running) {
  199. struct hidma_desc *desc = list_first_entry(&mchan->active,
  200. struct hidma_desc,
  201. node);
  202. mchan->running = desc;
  203. }
  204. spin_unlock_irqrestore(&mchan->lock, flags);
  205. /* PM will be released in hidma_callback function. */
  206. status = pm_runtime_get(dmadev->ddev.dev);
  207. if (status < 0)
  208. tasklet_schedule(&dmadev->task);
  209. else
  210. hidma_ll_start(dmadev->lldev);
  211. }
  212. static inline bool hidma_txn_is_success(dma_cookie_t cookie,
  213. dma_cookie_t last_success, dma_cookie_t last_used)
  214. {
  215. if (last_success <= last_used) {
  216. if ((cookie <= last_success) || (cookie > last_used))
  217. return true;
  218. } else {
  219. if ((cookie <= last_success) && (cookie > last_used))
  220. return true;
  221. }
  222. return false;
  223. }
  224. static enum dma_status hidma_tx_status(struct dma_chan *dmach,
  225. dma_cookie_t cookie,
  226. struct dma_tx_state *txstate)
  227. {
  228. struct hidma_chan *mchan = to_hidma_chan(dmach);
  229. enum dma_status ret;
  230. ret = dma_cookie_status(dmach, cookie, txstate);
  231. if (ret == DMA_COMPLETE) {
  232. bool is_success;
  233. is_success = hidma_txn_is_success(cookie, mchan->last_success,
  234. dmach->cookie);
  235. return is_success ? ret : DMA_ERROR;
  236. }
  237. if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
  238. unsigned long flags;
  239. dma_cookie_t runcookie;
  240. spin_lock_irqsave(&mchan->lock, flags);
  241. if (mchan->running)
  242. runcookie = mchan->running->desc.cookie;
  243. else
  244. runcookie = -EINVAL;
  245. if (runcookie == cookie)
  246. ret = DMA_PAUSED;
  247. spin_unlock_irqrestore(&mchan->lock, flags);
  248. }
  249. return ret;
  250. }
  251. /*
  252. * Submit descriptor to hardware.
  253. * Lock the PM for each descriptor we are sending.
  254. */
  255. static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
  256. {
  257. struct hidma_chan *mchan = to_hidma_chan(txd->chan);
  258. struct hidma_dev *dmadev = mchan->dmadev;
  259. struct hidma_desc *mdesc;
  260. unsigned long irqflags;
  261. dma_cookie_t cookie;
  262. pm_runtime_get_sync(dmadev->ddev.dev);
  263. if (!hidma_ll_isenabled(dmadev->lldev)) {
  264. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  265. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  266. return -ENODEV;
  267. }
  268. mdesc = container_of(txd, struct hidma_desc, desc);
  269. spin_lock_irqsave(&mchan->lock, irqflags);
  270. /* Move descriptor to active */
  271. list_move_tail(&mdesc->node, &mchan->active);
  272. /* Update cookie */
  273. cookie = dma_cookie_assign(txd);
  274. hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
  275. spin_unlock_irqrestore(&mchan->lock, irqflags);
  276. return cookie;
  277. }
  278. static int hidma_alloc_chan_resources(struct dma_chan *dmach)
  279. {
  280. struct hidma_chan *mchan = to_hidma_chan(dmach);
  281. struct hidma_dev *dmadev = mchan->dmadev;
  282. struct hidma_desc *mdesc, *tmp;
  283. unsigned long irqflags;
  284. LIST_HEAD(descs);
  285. unsigned int i;
  286. int rc = 0;
  287. if (mchan->allocated)
  288. return 0;
  289. /* Alloc descriptors for this channel */
  290. for (i = 0; i < dmadev->nr_descriptors; i++) {
  291. mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
  292. if (!mdesc) {
  293. rc = -ENOMEM;
  294. break;
  295. }
  296. dma_async_tx_descriptor_init(&mdesc->desc, dmach);
  297. mdesc->desc.tx_submit = hidma_tx_submit;
  298. rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
  299. "DMA engine", hidma_callback, mdesc,
  300. &mdesc->tre_ch);
  301. if (rc) {
  302. dev_err(dmach->device->dev,
  303. "channel alloc failed at %u\n", i);
  304. kfree(mdesc);
  305. break;
  306. }
  307. list_add_tail(&mdesc->node, &descs);
  308. }
  309. if (rc) {
  310. /* return the allocated descriptors */
  311. list_for_each_entry_safe(mdesc, tmp, &descs, node) {
  312. hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
  313. kfree(mdesc);
  314. }
  315. return rc;
  316. }
  317. spin_lock_irqsave(&mchan->lock, irqflags);
  318. list_splice_tail_init(&descs, &mchan->free);
  319. mchan->allocated = true;
  320. spin_unlock_irqrestore(&mchan->lock, irqflags);
  321. return 1;
  322. }
  323. static struct dma_async_tx_descriptor *
  324. hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
  325. size_t len, unsigned long flags)
  326. {
  327. struct hidma_chan *mchan = to_hidma_chan(dmach);
  328. struct hidma_desc *mdesc = NULL;
  329. struct hidma_dev *mdma = mchan->dmadev;
  330. unsigned long irqflags;
  331. /* Get free descriptor */
  332. spin_lock_irqsave(&mchan->lock, irqflags);
  333. if (!list_empty(&mchan->free)) {
  334. mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
  335. list_del(&mdesc->node);
  336. }
  337. spin_unlock_irqrestore(&mchan->lock, irqflags);
  338. if (!mdesc)
  339. return NULL;
  340. hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
  341. src, dest, len, flags);
  342. /* Place descriptor in prepared list */
  343. spin_lock_irqsave(&mchan->lock, irqflags);
  344. list_add_tail(&mdesc->node, &mchan->prepared);
  345. spin_unlock_irqrestore(&mchan->lock, irqflags);
  346. return &mdesc->desc;
  347. }
  348. static int hidma_terminate_channel(struct dma_chan *chan)
  349. {
  350. struct hidma_chan *mchan = to_hidma_chan(chan);
  351. struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
  352. struct hidma_desc *tmp, *mdesc;
  353. unsigned long irqflags;
  354. LIST_HEAD(list);
  355. int rc;
  356. pm_runtime_get_sync(dmadev->ddev.dev);
  357. /* give completed requests a chance to finish */
  358. hidma_process_completed(mchan);
  359. spin_lock_irqsave(&mchan->lock, irqflags);
  360. mchan->last_success = 0;
  361. list_splice_init(&mchan->active, &list);
  362. list_splice_init(&mchan->prepared, &list);
  363. list_splice_init(&mchan->completed, &list);
  364. spin_unlock_irqrestore(&mchan->lock, irqflags);
  365. /* this suspends the existing transfer */
  366. rc = hidma_ll_disable(dmadev->lldev);
  367. if (rc) {
  368. dev_err(dmadev->ddev.dev, "channel did not pause\n");
  369. goto out;
  370. }
  371. /* return all user requests */
  372. list_for_each_entry_safe(mdesc, tmp, &list, node) {
  373. struct dma_async_tx_descriptor *txd = &mdesc->desc;
  374. dma_descriptor_unmap(txd);
  375. dmaengine_desc_get_callback_invoke(txd, NULL);
  376. dma_run_dependencies(txd);
  377. /* move myself to free_list */
  378. list_move(&mdesc->node, &mchan->free);
  379. }
  380. rc = hidma_ll_enable(dmadev->lldev);
  381. out:
  382. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  383. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  384. return rc;
  385. }
  386. static int hidma_terminate_all(struct dma_chan *chan)
  387. {
  388. struct hidma_chan *mchan = to_hidma_chan(chan);
  389. struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
  390. int rc;
  391. rc = hidma_terminate_channel(chan);
  392. if (rc)
  393. return rc;
  394. /* reinitialize the hardware */
  395. pm_runtime_get_sync(dmadev->ddev.dev);
  396. rc = hidma_ll_setup(dmadev->lldev);
  397. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  398. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  399. return rc;
  400. }
  401. static void hidma_free_chan_resources(struct dma_chan *dmach)
  402. {
  403. struct hidma_chan *mchan = to_hidma_chan(dmach);
  404. struct hidma_dev *mdma = mchan->dmadev;
  405. struct hidma_desc *mdesc, *tmp;
  406. unsigned long irqflags;
  407. LIST_HEAD(descs);
  408. /* terminate running transactions and free descriptors */
  409. hidma_terminate_channel(dmach);
  410. spin_lock_irqsave(&mchan->lock, irqflags);
  411. /* Move data */
  412. list_splice_tail_init(&mchan->free, &descs);
  413. /* Free descriptors */
  414. list_for_each_entry_safe(mdesc, tmp, &descs, node) {
  415. hidma_ll_free(mdma->lldev, mdesc->tre_ch);
  416. list_del(&mdesc->node);
  417. kfree(mdesc);
  418. }
  419. mchan->allocated = 0;
  420. spin_unlock_irqrestore(&mchan->lock, irqflags);
  421. }
  422. static int hidma_pause(struct dma_chan *chan)
  423. {
  424. struct hidma_chan *mchan;
  425. struct hidma_dev *dmadev;
  426. mchan = to_hidma_chan(chan);
  427. dmadev = to_hidma_dev(mchan->chan.device);
  428. if (!mchan->paused) {
  429. pm_runtime_get_sync(dmadev->ddev.dev);
  430. if (hidma_ll_disable(dmadev->lldev))
  431. dev_warn(dmadev->ddev.dev, "channel did not stop\n");
  432. mchan->paused = true;
  433. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  434. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  435. }
  436. return 0;
  437. }
  438. static int hidma_resume(struct dma_chan *chan)
  439. {
  440. struct hidma_chan *mchan;
  441. struct hidma_dev *dmadev;
  442. int rc = 0;
  443. mchan = to_hidma_chan(chan);
  444. dmadev = to_hidma_dev(mchan->chan.device);
  445. if (mchan->paused) {
  446. pm_runtime_get_sync(dmadev->ddev.dev);
  447. rc = hidma_ll_enable(dmadev->lldev);
  448. if (!rc)
  449. mchan->paused = false;
  450. else
  451. dev_err(dmadev->ddev.dev,
  452. "failed to resume the channel");
  453. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  454. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  455. }
  456. return rc;
  457. }
  458. static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
  459. {
  460. struct hidma_lldev *lldev = arg;
  461. /*
  462. * All interrupts are request driven.
  463. * HW doesn't send an interrupt by itself.
  464. */
  465. return hidma_ll_inthandler(chirq, lldev);
  466. }
  467. #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  468. static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
  469. {
  470. struct hidma_lldev **lldevp = arg;
  471. struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
  472. return hidma_ll_inthandler_msi(chirq, *lldevp,
  473. 1 << (chirq - dmadev->msi_virqbase));
  474. }
  475. #endif
  476. static ssize_t hidma_show_values(struct device *dev,
  477. struct device_attribute *attr, char *buf)
  478. {
  479. struct platform_device *pdev = to_platform_device(dev);
  480. struct hidma_dev *mdev = platform_get_drvdata(pdev);
  481. buf[0] = 0;
  482. if (strcmp(attr->attr.name, "chid") == 0)
  483. sprintf(buf, "%d\n", mdev->chidx);
  484. return strlen(buf);
  485. }
  486. static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
  487. {
  488. device_remove_file(dev->ddev.dev, dev->chid_attrs);
  489. }
  490. static struct device_attribute*
  491. hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
  492. {
  493. struct device_attribute *attrs;
  494. char *name_copy;
  495. attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
  496. GFP_KERNEL);
  497. if (!attrs)
  498. return NULL;
  499. name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
  500. if (!name_copy)
  501. return NULL;
  502. attrs->attr.name = name_copy;
  503. attrs->attr.mode = mode;
  504. attrs->show = hidma_show_values;
  505. sysfs_attr_init(&attrs->attr);
  506. return attrs;
  507. }
  508. static int hidma_sysfs_init(struct hidma_dev *dev)
  509. {
  510. dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
  511. if (!dev->chid_attrs)
  512. return -ENOMEM;
  513. return device_create_file(dev->ddev.dev, dev->chid_attrs);
  514. }
  515. #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  516. static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
  517. {
  518. struct device *dev = msi_desc_to_dev(desc);
  519. struct hidma_dev *dmadev = dev_get_drvdata(dev);
  520. if (!desc->platform.msi_index) {
  521. writel(msg->address_lo, dmadev->dev_evca + 0x118);
  522. writel(msg->address_hi, dmadev->dev_evca + 0x11C);
  523. writel(msg->data, dmadev->dev_evca + 0x120);
  524. }
  525. }
  526. #endif
  527. static void hidma_free_msis(struct hidma_dev *dmadev)
  528. {
  529. #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  530. struct device *dev = dmadev->ddev.dev;
  531. struct msi_desc *desc;
  532. /* free allocated MSI interrupts above */
  533. for_each_msi_entry(desc, dev)
  534. devm_free_irq(dev, desc->irq, &dmadev->lldev);
  535. platform_msi_domain_free_irqs(dev);
  536. #endif
  537. }
  538. static int hidma_request_msi(struct hidma_dev *dmadev,
  539. struct platform_device *pdev)
  540. {
  541. #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  542. int rc;
  543. struct msi_desc *desc;
  544. struct msi_desc *failed_desc = NULL;
  545. rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
  546. hidma_write_msi_msg);
  547. if (rc)
  548. return rc;
  549. for_each_msi_entry(desc, &pdev->dev) {
  550. if (!desc->platform.msi_index)
  551. dmadev->msi_virqbase = desc->irq;
  552. rc = devm_request_irq(&pdev->dev, desc->irq,
  553. hidma_chirq_handler_msi,
  554. 0, "qcom-hidma-msi",
  555. &dmadev->lldev);
  556. if (rc) {
  557. failed_desc = desc;
  558. break;
  559. }
  560. }
  561. if (rc) {
  562. /* free allocated MSI interrupts above */
  563. for_each_msi_entry(desc, &pdev->dev) {
  564. if (desc == failed_desc)
  565. break;
  566. devm_free_irq(&pdev->dev, desc->irq,
  567. &dmadev->lldev);
  568. }
  569. } else {
  570. /* Add callback to free MSIs on teardown */
  571. hidma_ll_setup_irq(dmadev->lldev, true);
  572. }
  573. if (rc)
  574. dev_warn(&pdev->dev,
  575. "failed to request MSI irq, falling back to wired IRQ\n");
  576. return rc;
  577. #else
  578. return -EINVAL;
  579. #endif
  580. }
  581. static bool hidma_msi_capable(struct device *dev)
  582. {
  583. struct acpi_device *adev = ACPI_COMPANION(dev);
  584. const char *of_compat;
  585. int ret = -EINVAL;
  586. if (!adev || acpi_disabled) {
  587. ret = device_property_read_string(dev, "compatible",
  588. &of_compat);
  589. if (ret)
  590. return false;
  591. ret = strcmp(of_compat, "qcom,hidma-1.1");
  592. } else {
  593. #ifdef CONFIG_ACPI
  594. ret = strcmp(acpi_device_hid(adev), "QCOM8062");
  595. #endif
  596. }
  597. return ret == 0;
  598. }
  599. static int hidma_probe(struct platform_device *pdev)
  600. {
  601. struct hidma_dev *dmadev;
  602. struct resource *trca_resource;
  603. struct resource *evca_resource;
  604. int chirq;
  605. void __iomem *evca;
  606. void __iomem *trca;
  607. int rc;
  608. bool msi;
  609. pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
  610. pm_runtime_use_autosuspend(&pdev->dev);
  611. pm_runtime_set_active(&pdev->dev);
  612. pm_runtime_enable(&pdev->dev);
  613. trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  614. trca = devm_ioremap_resource(&pdev->dev, trca_resource);
  615. if (IS_ERR(trca)) {
  616. rc = -ENOMEM;
  617. goto bailout;
  618. }
  619. evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  620. evca = devm_ioremap_resource(&pdev->dev, evca_resource);
  621. if (IS_ERR(evca)) {
  622. rc = -ENOMEM;
  623. goto bailout;
  624. }
  625. /*
  626. * This driver only handles the channel IRQs.
  627. * Common IRQ is handled by the management driver.
  628. */
  629. chirq = platform_get_irq(pdev, 0);
  630. if (chirq < 0) {
  631. rc = -ENODEV;
  632. goto bailout;
  633. }
  634. dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
  635. if (!dmadev) {
  636. rc = -ENOMEM;
  637. goto bailout;
  638. }
  639. INIT_LIST_HEAD(&dmadev->ddev.channels);
  640. spin_lock_init(&dmadev->lock);
  641. dmadev->ddev.dev = &pdev->dev;
  642. pm_runtime_get_sync(dmadev->ddev.dev);
  643. dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
  644. if (WARN_ON(!pdev->dev.dma_mask)) {
  645. rc = -ENXIO;
  646. goto dmafree;
  647. }
  648. dmadev->dev_evca = evca;
  649. dmadev->evca_resource = evca_resource;
  650. dmadev->dev_trca = trca;
  651. dmadev->trca_resource = trca_resource;
  652. dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
  653. dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
  654. dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
  655. dmadev->ddev.device_tx_status = hidma_tx_status;
  656. dmadev->ddev.device_issue_pending = hidma_issue_pending;
  657. dmadev->ddev.device_pause = hidma_pause;
  658. dmadev->ddev.device_resume = hidma_resume;
  659. dmadev->ddev.device_terminate_all = hidma_terminate_all;
  660. dmadev->ddev.copy_align = 8;
  661. /*
  662. * Determine the MSI capability of the platform. Old HW doesn't
  663. * support MSI.
  664. */
  665. msi = hidma_msi_capable(&pdev->dev);
  666. device_property_read_u32(&pdev->dev, "desc-count",
  667. &dmadev->nr_descriptors);
  668. if (!dmadev->nr_descriptors && nr_desc_prm)
  669. dmadev->nr_descriptors = nr_desc_prm;
  670. if (!dmadev->nr_descriptors)
  671. dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
  672. dmadev->chidx = readl(dmadev->dev_trca + 0x28);
  673. /* Set DMA mask to 64 bits. */
  674. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  675. if (rc) {
  676. dev_warn(&pdev->dev, "unable to set coherent mask to 64");
  677. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  678. if (rc)
  679. goto dmafree;
  680. }
  681. dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
  682. dmadev->nr_descriptors, dmadev->dev_trca,
  683. dmadev->dev_evca, dmadev->chidx);
  684. if (!dmadev->lldev) {
  685. rc = -EPROBE_DEFER;
  686. goto dmafree;
  687. }
  688. platform_set_drvdata(pdev, dmadev);
  689. if (msi)
  690. rc = hidma_request_msi(dmadev, pdev);
  691. if (!msi || rc) {
  692. hidma_ll_setup_irq(dmadev->lldev, false);
  693. rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
  694. 0, "qcom-hidma", dmadev->lldev);
  695. if (rc)
  696. goto uninit;
  697. }
  698. INIT_LIST_HEAD(&dmadev->ddev.channels);
  699. rc = hidma_chan_init(dmadev, 0);
  700. if (rc)
  701. goto uninit;
  702. rc = dma_async_device_register(&dmadev->ddev);
  703. if (rc)
  704. goto uninit;
  705. dmadev->irq = chirq;
  706. tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
  707. hidma_debug_init(dmadev);
  708. hidma_sysfs_init(dmadev);
  709. dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
  710. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  711. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  712. return 0;
  713. uninit:
  714. if (msi)
  715. hidma_free_msis(dmadev);
  716. hidma_debug_uninit(dmadev);
  717. hidma_ll_uninit(dmadev->lldev);
  718. dmafree:
  719. if (dmadev)
  720. hidma_free(dmadev);
  721. bailout:
  722. pm_runtime_put_sync(&pdev->dev);
  723. pm_runtime_disable(&pdev->dev);
  724. return rc;
  725. }
  726. static int hidma_remove(struct platform_device *pdev)
  727. {
  728. struct hidma_dev *dmadev = platform_get_drvdata(pdev);
  729. pm_runtime_get_sync(dmadev->ddev.dev);
  730. dma_async_device_unregister(&dmadev->ddev);
  731. if (!dmadev->lldev->msi_support)
  732. devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
  733. else
  734. hidma_free_msis(dmadev);
  735. tasklet_kill(&dmadev->task);
  736. hidma_sysfs_uninit(dmadev);
  737. hidma_debug_uninit(dmadev);
  738. hidma_ll_uninit(dmadev->lldev);
  739. hidma_free(dmadev);
  740. dev_info(&pdev->dev, "HI-DMA engine removed\n");
  741. pm_runtime_put_sync_suspend(&pdev->dev);
  742. pm_runtime_disable(&pdev->dev);
  743. return 0;
  744. }
  745. #if IS_ENABLED(CONFIG_ACPI)
  746. static const struct acpi_device_id hidma_acpi_ids[] = {
  747. {"QCOM8061"},
  748. {"QCOM8062"},
  749. {},
  750. };
  751. MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
  752. #endif
  753. static const struct of_device_id hidma_match[] = {
  754. {.compatible = "qcom,hidma-1.0",},
  755. {.compatible = "qcom,hidma-1.1",},
  756. {},
  757. };
  758. MODULE_DEVICE_TABLE(of, hidma_match);
  759. static struct platform_driver hidma_driver = {
  760. .probe = hidma_probe,
  761. .remove = hidma_remove,
  762. .driver = {
  763. .name = "hidma",
  764. .of_match_table = hidma_match,
  765. .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
  766. },
  767. };
  768. module_platform_driver(hidma_driver);
  769. MODULE_LICENSE("GPL v2");