bcm2835-dma.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707
  1. /*
  2. * BCM2835 DMA engine support
  3. *
  4. * This driver only supports cyclic DMA transfers
  5. * as needed for the I2S module.
  6. *
  7. * Author: Florian Meier <florian.meier@koalo.de>
  8. * Copyright 2013
  9. *
  10. * Based on
  11. * OMAP DMAengine support by Russell King
  12. *
  13. * BCM2708 DMA Driver
  14. * Copyright (C) 2010 Broadcom
  15. *
  16. * Raspberry Pi PCM I2S ALSA Driver
  17. * Copyright (c) by Phil Poole 2013
  18. *
  19. * MARVELL MMP Peripheral DMA Driver
  20. * Copyright 2012 Marvell International Ltd.
  21. *
  22. * This program is free software; you can redistribute it and/or modify
  23. * it under the terms of the GNU General Public License as published by
  24. * the Free Software Foundation; either version 2 of the License, or
  25. * (at your option) any later version.
  26. *
  27. * This program is distributed in the hope that it will be useful,
  28. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  29. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  30. * GNU General Public License for more details.
  31. */
  32. #include <linux/dmaengine.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/err.h>
  35. #include <linux/init.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/list.h>
  38. #include <linux/module.h>
  39. #include <linux/platform_device.h>
  40. #include <linux/slab.h>
  41. #include <linux/io.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/of.h>
  44. #include <linux/of_dma.h>
  45. #include "virt-dma.h"
  46. struct bcm2835_dmadev {
  47. struct dma_device ddev;
  48. spinlock_t lock;
  49. void __iomem *base;
  50. struct device_dma_parameters dma_parms;
  51. };
  52. struct bcm2835_dma_cb {
  53. uint32_t info;
  54. uint32_t src;
  55. uint32_t dst;
  56. uint32_t length;
  57. uint32_t stride;
  58. uint32_t next;
  59. uint32_t pad[2];
  60. };
  61. struct bcm2835_chan {
  62. struct virt_dma_chan vc;
  63. struct list_head node;
  64. struct dma_slave_config cfg;
  65. bool cyclic;
  66. unsigned int dreq;
  67. int ch;
  68. struct bcm2835_desc *desc;
  69. void __iomem *chan_base;
  70. int irq_number;
  71. };
  72. struct bcm2835_desc {
  73. struct virt_dma_desc vd;
  74. enum dma_transfer_direction dir;
  75. unsigned int control_block_size;
  76. struct bcm2835_dma_cb *control_block_base;
  77. dma_addr_t control_block_base_phys;
  78. unsigned int frames;
  79. size_t size;
  80. };
  81. #define BCM2835_DMA_CS 0x00
  82. #define BCM2835_DMA_ADDR 0x04
  83. #define BCM2835_DMA_SOURCE_AD 0x0c
  84. #define BCM2835_DMA_DEST_AD 0x10
  85. #define BCM2835_DMA_NEXTCB 0x1C
  86. /* DMA CS Control and Status bits */
  87. #define BCM2835_DMA_ACTIVE BIT(0)
  88. #define BCM2835_DMA_INT BIT(2)
  89. #define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */
  90. #define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */
  91. #define BCM2835_DMA_ERR BIT(8)
  92. #define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */
  93. #define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */
  94. #define BCM2835_DMA_INT_EN BIT(0)
  95. #define BCM2835_DMA_D_INC BIT(4)
  96. #define BCM2835_DMA_D_DREQ BIT(6)
  97. #define BCM2835_DMA_S_INC BIT(8)
  98. #define BCM2835_DMA_S_DREQ BIT(10)
  99. #define BCM2835_DMA_PER_MAP(x) ((x) << 16)
  100. #define BCM2835_DMA_DATA_TYPE_S8 1
  101. #define BCM2835_DMA_DATA_TYPE_S16 2
  102. #define BCM2835_DMA_DATA_TYPE_S32 4
  103. #define BCM2835_DMA_DATA_TYPE_S128 16
  104. #define BCM2835_DMA_BULK_MASK BIT(0)
  105. #define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3))
  106. /* Valid only for channels 0 - 14, 15 has its own base address */
  107. #define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */
  108. #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
  109. static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
  110. {
  111. return container_of(d, struct bcm2835_dmadev, ddev);
  112. }
  113. static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
  114. {
  115. return container_of(c, struct bcm2835_chan, vc.chan);
  116. }
  117. static inline struct bcm2835_desc *to_bcm2835_dma_desc(
  118. struct dma_async_tx_descriptor *t)
  119. {
  120. return container_of(t, struct bcm2835_desc, vd.tx);
  121. }
  122. static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
  123. {
  124. struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
  125. dma_free_coherent(desc->vd.tx.chan->device->dev,
  126. desc->control_block_size,
  127. desc->control_block_base,
  128. desc->control_block_base_phys);
  129. kfree(desc);
  130. }
  131. static int bcm2835_dma_abort(void __iomem *chan_base)
  132. {
  133. unsigned long cs;
  134. long int timeout = 10000;
  135. cs = readl(chan_base + BCM2835_DMA_CS);
  136. if (!(cs & BCM2835_DMA_ACTIVE))
  137. return 0;
  138. /* Write 0 to the active bit - Pause the DMA */
  139. writel(0, chan_base + BCM2835_DMA_CS);
  140. /* Wait for any current AXI transfer to complete */
  141. while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
  142. cpu_relax();
  143. cs = readl(chan_base + BCM2835_DMA_CS);
  144. }
  145. /* We'll un-pause when we set of our next DMA */
  146. if (!timeout)
  147. return -ETIMEDOUT;
  148. if (!(cs & BCM2835_DMA_ACTIVE))
  149. return 0;
  150. /* Terminate the control block chain */
  151. writel(0, chan_base + BCM2835_DMA_NEXTCB);
  152. /* Abort the whole DMA */
  153. writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
  154. chan_base + BCM2835_DMA_CS);
  155. return 0;
  156. }
  157. static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
  158. {
  159. struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
  160. struct bcm2835_desc *d;
  161. if (!vd) {
  162. c->desc = NULL;
  163. return;
  164. }
  165. list_del(&vd->node);
  166. c->desc = d = to_bcm2835_dma_desc(&vd->tx);
  167. writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
  168. writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
  169. }
  170. static irqreturn_t bcm2835_dma_callback(int irq, void *data)
  171. {
  172. struct bcm2835_chan *c = data;
  173. struct bcm2835_desc *d;
  174. unsigned long flags;
  175. spin_lock_irqsave(&c->vc.lock, flags);
  176. /* Acknowledge interrupt */
  177. writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
  178. d = c->desc;
  179. if (d) {
  180. /* TODO Only works for cyclic DMA */
  181. vchan_cyclic_callback(&d->vd);
  182. }
  183. /* Keep the DMA engine running */
  184. writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
  185. spin_unlock_irqrestore(&c->vc.lock, flags);
  186. return IRQ_HANDLED;
  187. }
  188. static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
  189. {
  190. struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  191. dev_dbg(c->vc.chan.device->dev,
  192. "Allocating DMA channel %d\n", c->ch);
  193. return request_irq(c->irq_number,
  194. bcm2835_dma_callback, 0, "DMA IRQ", c);
  195. }
  196. static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
  197. {
  198. struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  199. vchan_free_chan_resources(&c->vc);
  200. free_irq(c->irq_number, c);
  201. dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
  202. }
  203. static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
  204. {
  205. return d->size;
  206. }
  207. static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
  208. {
  209. unsigned int i;
  210. size_t size;
  211. for (size = i = 0; i < d->frames; i++) {
  212. struct bcm2835_dma_cb *control_block =
  213. &d->control_block_base[i];
  214. size_t this_size = control_block->length;
  215. dma_addr_t dma;
  216. if (d->dir == DMA_DEV_TO_MEM)
  217. dma = control_block->dst;
  218. else
  219. dma = control_block->src;
  220. if (size)
  221. size += this_size;
  222. else if (addr >= dma && addr < dma + this_size)
  223. size += dma + this_size - addr;
  224. }
  225. return size;
  226. }
  227. static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
  228. dma_cookie_t cookie, struct dma_tx_state *txstate)
  229. {
  230. struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  231. struct virt_dma_desc *vd;
  232. enum dma_status ret;
  233. unsigned long flags;
  234. ret = dma_cookie_status(chan, cookie, txstate);
  235. if (ret == DMA_COMPLETE || !txstate)
  236. return ret;
  237. spin_lock_irqsave(&c->vc.lock, flags);
  238. vd = vchan_find_desc(&c->vc, cookie);
  239. if (vd) {
  240. txstate->residue =
  241. bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
  242. } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
  243. struct bcm2835_desc *d = c->desc;
  244. dma_addr_t pos;
  245. if (d->dir == DMA_MEM_TO_DEV)
  246. pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
  247. else if (d->dir == DMA_DEV_TO_MEM)
  248. pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
  249. else
  250. pos = 0;
  251. txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
  252. } else {
  253. txstate->residue = 0;
  254. }
  255. spin_unlock_irqrestore(&c->vc.lock, flags);
  256. return ret;
  257. }
  258. static void bcm2835_dma_issue_pending(struct dma_chan *chan)
  259. {
  260. struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  261. unsigned long flags;
  262. c->cyclic = true; /* Nothing else is implemented */
  263. spin_lock_irqsave(&c->vc.lock, flags);
  264. if (vchan_issue_pending(&c->vc) && !c->desc)
  265. bcm2835_dma_start_desc(c);
  266. spin_unlock_irqrestore(&c->vc.lock, flags);
  267. }
  268. static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
  269. struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  270. size_t period_len, enum dma_transfer_direction direction,
  271. unsigned long flags, void *context)
  272. {
  273. struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  274. enum dma_slave_buswidth dev_width;
  275. struct bcm2835_desc *d;
  276. dma_addr_t dev_addr;
  277. unsigned int es, sync_type;
  278. unsigned int frame;
  279. /* Grab configuration */
  280. if (!is_slave_direction(direction)) {
  281. dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
  282. return NULL;
  283. }
  284. if (direction == DMA_DEV_TO_MEM) {
  285. dev_addr = c->cfg.src_addr;
  286. dev_width = c->cfg.src_addr_width;
  287. sync_type = BCM2835_DMA_S_DREQ;
  288. } else {
  289. dev_addr = c->cfg.dst_addr;
  290. dev_width = c->cfg.dst_addr_width;
  291. sync_type = BCM2835_DMA_D_DREQ;
  292. }
  293. /* Bus width translates to the element size (ES) */
  294. switch (dev_width) {
  295. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  296. es = BCM2835_DMA_DATA_TYPE_S32;
  297. break;
  298. default:
  299. return NULL;
  300. }
  301. /* Now allocate and setup the descriptor. */
  302. d = kzalloc(sizeof(*d), GFP_NOWAIT);
  303. if (!d)
  304. return NULL;
  305. d->dir = direction;
  306. d->frames = buf_len / period_len;
  307. /* Allocate memory for control blocks */
  308. d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
  309. d->control_block_base = dma_zalloc_coherent(chan->device->dev,
  310. d->control_block_size, &d->control_block_base_phys,
  311. GFP_NOWAIT);
  312. if (!d->control_block_base) {
  313. kfree(d);
  314. return NULL;
  315. }
  316. /*
  317. * Iterate over all frames, create a control block
  318. * for each frame and link them together.
  319. */
  320. for (frame = 0; frame < d->frames; frame++) {
  321. struct bcm2835_dma_cb *control_block =
  322. &d->control_block_base[frame];
  323. /* Setup adresses */
  324. if (d->dir == DMA_DEV_TO_MEM) {
  325. control_block->info = BCM2835_DMA_D_INC;
  326. control_block->src = dev_addr;
  327. control_block->dst = buf_addr + frame * period_len;
  328. } else {
  329. control_block->info = BCM2835_DMA_S_INC;
  330. control_block->src = buf_addr + frame * period_len;
  331. control_block->dst = dev_addr;
  332. }
  333. /* Enable interrupt */
  334. control_block->info |= BCM2835_DMA_INT_EN;
  335. /* Setup synchronization */
  336. if (sync_type != 0)
  337. control_block->info |= sync_type;
  338. /* Setup DREQ channel */
  339. if (c->dreq != 0)
  340. control_block->info |=
  341. BCM2835_DMA_PER_MAP(c->dreq);
  342. /* Length of a frame */
  343. control_block->length = period_len;
  344. d->size += control_block->length;
  345. /*
  346. * Next block is the next frame.
  347. * This DMA engine driver currently only supports cyclic DMA.
  348. * Therefore, wrap around at number of frames.
  349. */
  350. control_block->next = d->control_block_base_phys +
  351. sizeof(struct bcm2835_dma_cb)
  352. * ((frame + 1) % d->frames);
  353. }
  354. return vchan_tx_prep(&c->vc, &d->vd, flags);
  355. }
  356. static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
  357. struct dma_slave_config *cfg)
  358. {
  359. if ((cfg->direction == DMA_DEV_TO_MEM &&
  360. cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
  361. (cfg->direction == DMA_MEM_TO_DEV &&
  362. cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
  363. !is_slave_direction(cfg->direction)) {
  364. return -EINVAL;
  365. }
  366. c->cfg = *cfg;
  367. return 0;
  368. }
  369. static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
  370. {
  371. struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
  372. unsigned long flags;
  373. int timeout = 10000;
  374. LIST_HEAD(head);
  375. spin_lock_irqsave(&c->vc.lock, flags);
  376. /* Prevent this channel being scheduled */
  377. spin_lock(&d->lock);
  378. list_del_init(&c->node);
  379. spin_unlock(&d->lock);
  380. /*
  381. * Stop DMA activity: we assume the callback will not be called
  382. * after bcm_dma_abort() returns (even if it does, it will see
  383. * c->desc is NULL and exit.)
  384. */
  385. if (c->desc) {
  386. c->desc = NULL;
  387. bcm2835_dma_abort(c->chan_base);
  388. /* Wait for stopping */
  389. while (--timeout) {
  390. if (!(readl(c->chan_base + BCM2835_DMA_CS) &
  391. BCM2835_DMA_ACTIVE))
  392. break;
  393. cpu_relax();
  394. }
  395. if (!timeout)
  396. dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
  397. }
  398. vchan_get_all_descriptors(&c->vc, &head);
  399. spin_unlock_irqrestore(&c->vc.lock, flags);
  400. vchan_dma_desc_free_list(&c->vc, &head);
  401. return 0;
  402. }
  403. static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  404. unsigned long arg)
  405. {
  406. struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  407. switch (cmd) {
  408. case DMA_SLAVE_CONFIG:
  409. return bcm2835_dma_slave_config(c,
  410. (struct dma_slave_config *)arg);
  411. case DMA_TERMINATE_ALL:
  412. return bcm2835_dma_terminate_all(c);
  413. default:
  414. return -ENXIO;
  415. }
  416. }
  417. static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
  418. {
  419. struct bcm2835_chan *c;
  420. c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
  421. if (!c)
  422. return -ENOMEM;
  423. c->vc.desc_free = bcm2835_dma_desc_free;
  424. vchan_init(&c->vc, &d->ddev);
  425. INIT_LIST_HEAD(&c->node);
  426. d->ddev.chancnt++;
  427. c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
  428. c->ch = chan_id;
  429. c->irq_number = irq;
  430. return 0;
  431. }
  432. static void bcm2835_dma_free(struct bcm2835_dmadev *od)
  433. {
  434. struct bcm2835_chan *c, *next;
  435. list_for_each_entry_safe(c, next, &od->ddev.channels,
  436. vc.chan.device_node) {
  437. list_del(&c->vc.chan.device_node);
  438. tasklet_kill(&c->vc.task);
  439. }
  440. }
  441. static const struct of_device_id bcm2835_dma_of_match[] = {
  442. { .compatible = "brcm,bcm2835-dma", },
  443. {},
  444. };
  445. MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
  446. static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
  447. struct of_dma *ofdma)
  448. {
  449. struct bcm2835_dmadev *d = ofdma->of_dma_data;
  450. struct dma_chan *chan;
  451. chan = dma_get_any_slave_channel(&d->ddev);
  452. if (!chan)
  453. return NULL;
  454. /* Set DREQ from param */
  455. to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
  456. return chan;
  457. }
  458. static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
  459. struct dma_slave_caps *caps)
  460. {
  461. caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  462. caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  463. caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  464. caps->cmd_pause = false;
  465. caps->cmd_terminate = true;
  466. return 0;
  467. }
  468. static int bcm2835_dma_probe(struct platform_device *pdev)
  469. {
  470. struct bcm2835_dmadev *od;
  471. struct resource *res;
  472. void __iomem *base;
  473. int rc;
  474. int i;
  475. int irq;
  476. uint32_t chans_available;
  477. if (!pdev->dev.dma_mask)
  478. pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
  479. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  480. if (rc)
  481. return rc;
  482. od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
  483. if (!od)
  484. return -ENOMEM;
  485. pdev->dev.dma_parms = &od->dma_parms;
  486. dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
  487. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  488. base = devm_ioremap_resource(&pdev->dev, res);
  489. if (IS_ERR(base))
  490. return PTR_ERR(base);
  491. od->base = base;
  492. dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
  493. dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
  494. dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
  495. od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
  496. od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
  497. od->ddev.device_tx_status = bcm2835_dma_tx_status;
  498. od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
  499. od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
  500. od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
  501. od->ddev.device_control = bcm2835_dma_control;
  502. od->ddev.dev = &pdev->dev;
  503. INIT_LIST_HEAD(&od->ddev.channels);
  504. spin_lock_init(&od->lock);
  505. platform_set_drvdata(pdev, od);
  506. /* Request DMA channel mask from device tree */
  507. if (of_property_read_u32(pdev->dev.of_node,
  508. "brcm,dma-channel-mask",
  509. &chans_available)) {
  510. dev_err(&pdev->dev, "Failed to get channel mask\n");
  511. rc = -EINVAL;
  512. goto err_no_dma;
  513. }
  514. /*
  515. * Do not use the FIQ and BULK channels,
  516. * because they are used by the GPU.
  517. */
  518. chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK);
  519. for (i = 0; i < pdev->num_resources; i++) {
  520. irq = platform_get_irq(pdev, i);
  521. if (irq < 0)
  522. break;
  523. if (chans_available & (1 << i)) {
  524. rc = bcm2835_dma_chan_init(od, i, irq);
  525. if (rc)
  526. goto err_no_dma;
  527. }
  528. }
  529. dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
  530. /* Device-tree DMA controller registration */
  531. rc = of_dma_controller_register(pdev->dev.of_node,
  532. bcm2835_dma_xlate, od);
  533. if (rc) {
  534. dev_err(&pdev->dev, "Failed to register DMA controller\n");
  535. goto err_no_dma;
  536. }
  537. rc = dma_async_device_register(&od->ddev);
  538. if (rc) {
  539. dev_err(&pdev->dev,
  540. "Failed to register slave DMA engine device: %d\n", rc);
  541. goto err_no_dma;
  542. }
  543. dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
  544. return 0;
  545. err_no_dma:
  546. bcm2835_dma_free(od);
  547. return rc;
  548. }
  549. static int bcm2835_dma_remove(struct platform_device *pdev)
  550. {
  551. struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
  552. dma_async_device_unregister(&od->ddev);
  553. bcm2835_dma_free(od);
  554. return 0;
  555. }
  556. static struct platform_driver bcm2835_dma_driver = {
  557. .probe = bcm2835_dma_probe,
  558. .remove = bcm2835_dma_remove,
  559. .driver = {
  560. .name = "bcm2835-dma",
  561. .owner = THIS_MODULE,
  562. .of_match_table = of_match_ptr(bcm2835_dma_of_match),
  563. },
  564. };
  565. module_platform_driver(bcm2835_dma_driver);
  566. MODULE_ALIAS("platform:bcm2835-dma");
  567. MODULE_DESCRIPTION("BCM2835 DMA engine driver");
  568. MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
  569. MODULE_LICENSE("GPL v2");