davinci_cpdma.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319
  1. /*
  2. * Texas Instruments CPDMA Driver
  3. *
  4. * Copyright (C) 2010 Texas Instruments
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation version 2.
  9. *
  10. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11. * kind, whether express or implied; without even the implied warranty
  12. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/device.h>
  18. #include <linux/module.h>
  19. #include <linux/slab.h>
  20. #include <linux/err.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/io.h>
  23. #include <linux/delay.h>
  24. #include <linux/genalloc.h>
  25. #include "davinci_cpdma.h"
  26. /* DMA Registers */
  27. #define CPDMA_TXIDVER 0x00
  28. #define CPDMA_TXCONTROL 0x04
  29. #define CPDMA_TXTEARDOWN 0x08
  30. #define CPDMA_RXIDVER 0x10
  31. #define CPDMA_RXCONTROL 0x14
  32. #define CPDMA_SOFTRESET 0x1c
  33. #define CPDMA_RXTEARDOWN 0x18
  34. #define CPDMA_TX_PRI0_RATE 0x30
  35. #define CPDMA_TXINTSTATRAW 0x80
  36. #define CPDMA_TXINTSTATMASKED 0x84
  37. #define CPDMA_TXINTMASKSET 0x88
  38. #define CPDMA_TXINTMASKCLEAR 0x8c
  39. #define CPDMA_MACINVECTOR 0x90
  40. #define CPDMA_MACEOIVECTOR 0x94
  41. #define CPDMA_RXINTSTATRAW 0xa0
  42. #define CPDMA_RXINTSTATMASKED 0xa4
  43. #define CPDMA_RXINTMASKSET 0xa8
  44. #define CPDMA_RXINTMASKCLEAR 0xac
  45. #define CPDMA_DMAINTSTATRAW 0xb0
  46. #define CPDMA_DMAINTSTATMASKED 0xb4
  47. #define CPDMA_DMAINTMASKSET 0xb8
  48. #define CPDMA_DMAINTMASKCLEAR 0xbc
  49. #define CPDMA_DMAINT_HOSTERR BIT(1)
  50. /* the following exist only if has_ext_regs is set */
  51. #define CPDMA_DMACONTROL 0x20
  52. #define CPDMA_DMASTATUS 0x24
  53. #define CPDMA_RXBUFFOFS 0x28
  54. #define CPDMA_EM_CONTROL 0x2c
  55. /* Descriptor mode bits */
  56. #define CPDMA_DESC_SOP BIT(31)
  57. #define CPDMA_DESC_EOP BIT(30)
  58. #define CPDMA_DESC_OWNER BIT(29)
  59. #define CPDMA_DESC_EOQ BIT(28)
  60. #define CPDMA_DESC_TD_COMPLETE BIT(27)
  61. #define CPDMA_DESC_PASS_CRC BIT(26)
  62. #define CPDMA_DESC_TO_PORT_EN BIT(20)
  63. #define CPDMA_TO_PORT_SHIFT 16
  64. #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
  65. #define CPDMA_DESC_CRC_LEN 4
  66. #define CPDMA_TEARDOWN_VALUE 0xfffffffc
  67. #define CPDMA_MAX_RLIM_CNT 16384
  68. struct cpdma_desc {
  69. /* hardware fields */
  70. u32 hw_next;
  71. u32 hw_buffer;
  72. u32 hw_len;
  73. u32 hw_mode;
  74. /* software fields */
  75. void *sw_token;
  76. u32 sw_buffer;
  77. u32 sw_len;
  78. };
  79. struct cpdma_desc_pool {
  80. phys_addr_t phys;
  81. dma_addr_t hw_addr;
  82. void __iomem *iomap; /* ioremap map */
  83. void *cpumap; /* dma_alloc map */
  84. int desc_size, mem_size;
  85. int num_desc;
  86. struct device *dev;
  87. struct gen_pool *gen_pool;
  88. };
  89. enum cpdma_state {
  90. CPDMA_STATE_IDLE,
  91. CPDMA_STATE_ACTIVE,
  92. CPDMA_STATE_TEARDOWN,
  93. };
  94. struct cpdma_ctlr {
  95. enum cpdma_state state;
  96. struct cpdma_params params;
  97. struct device *dev;
  98. struct cpdma_desc_pool *pool;
  99. spinlock_t lock;
  100. struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
  101. int chan_num;
  102. };
  103. struct cpdma_chan {
  104. struct cpdma_desc __iomem *head, *tail;
  105. void __iomem *hdp, *cp, *rxfree;
  106. enum cpdma_state state;
  107. struct cpdma_ctlr *ctlr;
  108. int chan_num;
  109. spinlock_t lock;
  110. int count;
  111. u32 desc_num;
  112. u32 mask;
  113. cpdma_handler_fn handler;
  114. enum dma_data_direction dir;
  115. struct cpdma_chan_stats stats;
  116. /* offsets into dmaregs */
  117. int int_set, int_clear, td;
  118. int weight;
  119. u32 rate_factor;
  120. u32 rate;
  121. };
  122. struct cpdma_control_info {
  123. u32 reg;
  124. u32 shift, mask;
  125. int access;
  126. #define ACCESS_RO BIT(0)
  127. #define ACCESS_WO BIT(1)
  128. #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
  129. };
  130. static struct cpdma_control_info controls[] = {
  131. [CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW},
  132. [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
  133. [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
  134. [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
  135. [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
  136. [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
  137. [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
  138. [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
  139. [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
  140. [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
  141. [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
  142. [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
  143. };
  144. #define tx_chan_num(chan) (chan)
  145. #define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
  146. #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
  147. #define is_tx_chan(chan) (!is_rx_chan(chan))
  148. #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
  149. #define chan_linear(chan) __chan_linear((chan)->chan_num)
  150. /* The following make access to common cpdma_ctlr params more readable */
  151. #define dmaregs params.dmaregs
  152. #define num_chan params.num_chan
  153. /* various accessors */
  154. #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
  155. #define chan_read(chan, fld) __raw_readl((chan)->fld)
  156. #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
  157. #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
  158. #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
  159. #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
  160. #define cpdma_desc_to_port(chan, mode, directed) \
  161. do { \
  162. if (!is_rx_chan(chan) && ((directed == 1) || \
  163. (directed == 2))) \
  164. mode |= (CPDMA_DESC_TO_PORT_EN | \
  165. (directed << CPDMA_TO_PORT_SHIFT)); \
  166. } while (0)
  167. static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
  168. {
  169. if (!pool)
  170. return;
  171. WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
  172. "cpdma_desc_pool size %d != avail %d",
  173. gen_pool_size(pool->gen_pool),
  174. gen_pool_avail(pool->gen_pool));
  175. if (pool->cpumap)
  176. dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
  177. pool->phys);
  178. else
  179. iounmap(pool->iomap);
  180. }
  181. /*
  182. * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
  183. * emac) have dedicated on-chip memory for these descriptors. Some other
  184. * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
  185. * abstract out these details
  186. */
  187. static struct cpdma_desc_pool *
  188. cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
  189. int size, int align)
  190. {
  191. struct cpdma_desc_pool *pool;
  192. int ret;
  193. pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
  194. if (!pool)
  195. goto gen_pool_create_fail;
  196. pool->dev = dev;
  197. pool->mem_size = size;
  198. pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
  199. pool->num_desc = size / pool->desc_size;
  200. pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
  201. "cpdma");
  202. if (IS_ERR(pool->gen_pool)) {
  203. dev_err(dev, "pool create failed %ld\n",
  204. PTR_ERR(pool->gen_pool));
  205. goto gen_pool_create_fail;
  206. }
  207. if (phys) {
  208. pool->phys = phys;
  209. pool->iomap = ioremap(phys, size); /* should be memremap? */
  210. pool->hw_addr = hw_addr;
  211. } else {
  212. pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
  213. GFP_KERNEL);
  214. pool->iomap = (void __iomem __force *)pool->cpumap;
  215. pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
  216. }
  217. if (!pool->iomap)
  218. goto gen_pool_create_fail;
  219. ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
  220. pool->phys, pool->mem_size, -1);
  221. if (ret < 0) {
  222. dev_err(dev, "pool add failed %d\n", ret);
  223. goto gen_pool_add_virt_fail;
  224. }
  225. return pool;
  226. gen_pool_add_virt_fail:
  227. cpdma_desc_pool_destroy(pool);
  228. gen_pool_create_fail:
  229. return NULL;
  230. }
  231. static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
  232. struct cpdma_desc __iomem *desc)
  233. {
  234. if (!desc)
  235. return 0;
  236. return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
  237. }
  238. static inline struct cpdma_desc __iomem *
  239. desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
  240. {
  241. return dma ? pool->iomap + dma - pool->hw_addr : NULL;
  242. }
  243. static struct cpdma_desc __iomem *
  244. cpdma_desc_alloc(struct cpdma_desc_pool *pool)
  245. {
  246. return (struct cpdma_desc __iomem *)
  247. gen_pool_alloc(pool->gen_pool, pool->desc_size);
  248. }
  249. static void cpdma_desc_free(struct cpdma_desc_pool *pool,
  250. struct cpdma_desc __iomem *desc, int num_desc)
  251. {
  252. gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
  253. }
  254. static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
  255. {
  256. struct cpdma_control_info *info = &controls[control];
  257. u32 val;
  258. if (!ctlr->params.has_ext_regs)
  259. return -ENOTSUPP;
  260. if (ctlr->state != CPDMA_STATE_ACTIVE)
  261. return -EINVAL;
  262. if (control < 0 || control >= ARRAY_SIZE(controls))
  263. return -ENOENT;
  264. if ((info->access & ACCESS_WO) != ACCESS_WO)
  265. return -EPERM;
  266. val = dma_reg_read(ctlr, info->reg);
  267. val &= ~(info->mask << info->shift);
  268. val |= (value & info->mask) << info->shift;
  269. dma_reg_write(ctlr, info->reg, val);
  270. return 0;
  271. }
  272. static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
  273. {
  274. struct cpdma_control_info *info = &controls[control];
  275. int ret;
  276. if (!ctlr->params.has_ext_regs)
  277. return -ENOTSUPP;
  278. if (ctlr->state != CPDMA_STATE_ACTIVE)
  279. return -EINVAL;
  280. if (control < 0 || control >= ARRAY_SIZE(controls))
  281. return -ENOENT;
  282. if ((info->access & ACCESS_RO) != ACCESS_RO)
  283. return -EPERM;
  284. ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
  285. return ret;
  286. }
  287. /* cpdma_chan_set_chan_shaper - set shaper for a channel
  288. * Has to be called under ctlr lock
  289. */
  290. static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
  291. {
  292. struct cpdma_ctlr *ctlr = chan->ctlr;
  293. u32 rate_reg;
  294. u32 rmask;
  295. int ret;
  296. if (!chan->rate)
  297. return 0;
  298. rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
  299. dma_reg_write(ctlr, rate_reg, chan->rate_factor);
  300. rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
  301. rmask |= chan->mask;
  302. ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
  303. return ret;
  304. }
  305. static int cpdma_chan_on(struct cpdma_chan *chan)
  306. {
  307. struct cpdma_ctlr *ctlr = chan->ctlr;
  308. struct cpdma_desc_pool *pool = ctlr->pool;
  309. unsigned long flags;
  310. spin_lock_irqsave(&chan->lock, flags);
  311. if (chan->state != CPDMA_STATE_IDLE) {
  312. spin_unlock_irqrestore(&chan->lock, flags);
  313. return -EBUSY;
  314. }
  315. if (ctlr->state != CPDMA_STATE_ACTIVE) {
  316. spin_unlock_irqrestore(&chan->lock, flags);
  317. return -EINVAL;
  318. }
  319. dma_reg_write(ctlr, chan->int_set, chan->mask);
  320. chan->state = CPDMA_STATE_ACTIVE;
  321. if (chan->head) {
  322. chan_write(chan, hdp, desc_phys(pool, chan->head));
  323. if (chan->rxfree)
  324. chan_write(chan, rxfree, chan->count);
  325. }
  326. spin_unlock_irqrestore(&chan->lock, flags);
  327. return 0;
  328. }
  329. /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
  330. * rmask - mask of rate limited channels
  331. * Returns min rate in Kb/s
  332. */
  333. static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
  334. u32 *rmask, int *prio_mode)
  335. {
  336. struct cpdma_ctlr *ctlr = ch->ctlr;
  337. struct cpdma_chan *chan;
  338. u32 old_rate = ch->rate;
  339. u32 new_rmask = 0;
  340. int rlim = 1;
  341. int i;
  342. *prio_mode = 0;
  343. for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
  344. chan = ctlr->channels[i];
  345. if (!chan) {
  346. rlim = 0;
  347. continue;
  348. }
  349. if (chan == ch)
  350. chan->rate = rate;
  351. if (chan->rate) {
  352. if (rlim) {
  353. new_rmask |= chan->mask;
  354. } else {
  355. ch->rate = old_rate;
  356. dev_err(ctlr->dev, "Prev channel of %dch is not rate limited\n",
  357. chan->chan_num);
  358. return -EINVAL;
  359. }
  360. } else {
  361. *prio_mode = 1;
  362. rlim = 0;
  363. }
  364. }
  365. *rmask = new_rmask;
  366. return 0;
  367. }
  368. static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
  369. struct cpdma_chan *ch)
  370. {
  371. u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
  372. u32 best_send_cnt = 0, best_idle_cnt = 0;
  373. u32 new_rate, best_rate = 0, rate_reg;
  374. u64 send_cnt, idle_cnt;
  375. u32 min_send_cnt, freq;
  376. u64 divident, divisor;
  377. if (!ch->rate) {
  378. ch->rate_factor = 0;
  379. goto set_factor;
  380. }
  381. freq = ctlr->params.bus_freq_mhz * 1000 * 32;
  382. if (!freq) {
  383. dev_err(ctlr->dev, "The bus frequency is not set\n");
  384. return -EINVAL;
  385. }
  386. min_send_cnt = freq - ch->rate;
  387. send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
  388. while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
  389. divident = ch->rate * send_cnt;
  390. divisor = min_send_cnt;
  391. idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
  392. divident = freq * idle_cnt;
  393. divisor = idle_cnt + send_cnt;
  394. new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
  395. delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
  396. if (delta < best_delta) {
  397. best_delta = delta;
  398. best_send_cnt = send_cnt;
  399. best_idle_cnt = idle_cnt;
  400. best_rate = new_rate;
  401. if (!delta)
  402. break;
  403. }
  404. if (prev_delta >= delta) {
  405. prev_delta = delta;
  406. send_cnt++;
  407. continue;
  408. }
  409. idle_cnt++;
  410. divident = freq * idle_cnt;
  411. send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
  412. send_cnt -= idle_cnt;
  413. prev_delta = UINT_MAX;
  414. }
  415. ch->rate = best_rate;
  416. ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
  417. set_factor:
  418. rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
  419. dma_reg_write(ctlr, rate_reg, ch->rate_factor);
  420. return 0;
  421. }
  422. struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
  423. {
  424. struct cpdma_ctlr *ctlr;
  425. ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
  426. if (!ctlr)
  427. return NULL;
  428. ctlr->state = CPDMA_STATE_IDLE;
  429. ctlr->params = *params;
  430. ctlr->dev = params->dev;
  431. ctlr->chan_num = 0;
  432. spin_lock_init(&ctlr->lock);
  433. ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
  434. ctlr->params.desc_mem_phys,
  435. ctlr->params.desc_hw_addr,
  436. ctlr->params.desc_mem_size,
  437. ctlr->params.desc_align);
  438. if (!ctlr->pool)
  439. return NULL;
  440. if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
  441. ctlr->num_chan = CPDMA_MAX_CHANNELS;
  442. return ctlr;
  443. }
  444. EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
  445. int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
  446. {
  447. struct cpdma_chan *chan;
  448. unsigned long flags;
  449. int i, prio_mode;
  450. spin_lock_irqsave(&ctlr->lock, flags);
  451. if (ctlr->state != CPDMA_STATE_IDLE) {
  452. spin_unlock_irqrestore(&ctlr->lock, flags);
  453. return -EBUSY;
  454. }
  455. if (ctlr->params.has_soft_reset) {
  456. unsigned timeout = 10 * 100;
  457. dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
  458. while (timeout) {
  459. if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
  460. break;
  461. udelay(10);
  462. timeout--;
  463. }
  464. WARN_ON(!timeout);
  465. }
  466. for (i = 0; i < ctlr->num_chan; i++) {
  467. __raw_writel(0, ctlr->params.txhdp + 4 * i);
  468. __raw_writel(0, ctlr->params.rxhdp + 4 * i);
  469. __raw_writel(0, ctlr->params.txcp + 4 * i);
  470. __raw_writel(0, ctlr->params.rxcp + 4 * i);
  471. }
  472. dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
  473. dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
  474. dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
  475. dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
  476. ctlr->state = CPDMA_STATE_ACTIVE;
  477. prio_mode = 0;
  478. for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
  479. chan = ctlr->channels[i];
  480. if (chan) {
  481. cpdma_chan_set_chan_shaper(chan);
  482. cpdma_chan_on(chan);
  483. /* off prio mode if all tx channels are rate limited */
  484. if (is_tx_chan(chan) && !chan->rate)
  485. prio_mode = 1;
  486. }
  487. }
  488. _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
  489. _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
  490. spin_unlock_irqrestore(&ctlr->lock, flags);
  491. return 0;
  492. }
  493. EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
  494. int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
  495. {
  496. unsigned long flags;
  497. int i;
  498. spin_lock_irqsave(&ctlr->lock, flags);
  499. if (ctlr->state != CPDMA_STATE_ACTIVE) {
  500. spin_unlock_irqrestore(&ctlr->lock, flags);
  501. return -EINVAL;
  502. }
  503. ctlr->state = CPDMA_STATE_TEARDOWN;
  504. spin_unlock_irqrestore(&ctlr->lock, flags);
  505. for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
  506. if (ctlr->channels[i])
  507. cpdma_chan_stop(ctlr->channels[i]);
  508. }
  509. spin_lock_irqsave(&ctlr->lock, flags);
  510. dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
  511. dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
  512. dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
  513. dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
  514. ctlr->state = CPDMA_STATE_IDLE;
  515. spin_unlock_irqrestore(&ctlr->lock, flags);
  516. return 0;
  517. }
  518. EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
  519. int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
  520. {
  521. int ret = 0, i;
  522. if (!ctlr)
  523. return -EINVAL;
  524. if (ctlr->state != CPDMA_STATE_IDLE)
  525. cpdma_ctlr_stop(ctlr);
  526. for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
  527. cpdma_chan_destroy(ctlr->channels[i]);
  528. cpdma_desc_pool_destroy(ctlr->pool);
  529. return ret;
  530. }
  531. EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
  532. int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
  533. {
  534. unsigned long flags;
  535. int i, reg;
  536. spin_lock_irqsave(&ctlr->lock, flags);
  537. if (ctlr->state != CPDMA_STATE_ACTIVE) {
  538. spin_unlock_irqrestore(&ctlr->lock, flags);
  539. return -EINVAL;
  540. }
  541. reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
  542. dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
  543. for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
  544. if (ctlr->channels[i])
  545. cpdma_chan_int_ctrl(ctlr->channels[i], enable);
  546. }
  547. spin_unlock_irqrestore(&ctlr->lock, flags);
  548. return 0;
  549. }
  550. EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
  551. void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
  552. {
  553. dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
  554. }
  555. EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
  556. u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
  557. {
  558. return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
  559. }
  560. EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state);
  561. u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
  562. {
  563. return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
  564. }
  565. EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state);
  566. static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
  567. int rx, int desc_num,
  568. int per_ch_desc)
  569. {
  570. struct cpdma_chan *chan, *most_chan = NULL;
  571. int desc_cnt = desc_num;
  572. int most_dnum = 0;
  573. int min, max, i;
  574. if (!desc_num)
  575. return;
  576. if (rx) {
  577. min = rx_chan_num(0);
  578. max = rx_chan_num(CPDMA_MAX_CHANNELS);
  579. } else {
  580. min = tx_chan_num(0);
  581. max = tx_chan_num(CPDMA_MAX_CHANNELS);
  582. }
  583. for (i = min; i < max; i++) {
  584. chan = ctlr->channels[i];
  585. if (!chan)
  586. continue;
  587. if (chan->weight)
  588. chan->desc_num = (chan->weight * desc_num) / 100;
  589. else
  590. chan->desc_num = per_ch_desc;
  591. desc_cnt -= chan->desc_num;
  592. if (most_dnum < chan->desc_num) {
  593. most_dnum = chan->desc_num;
  594. most_chan = chan;
  595. }
  596. }
  597. /* use remains */
  598. most_chan->desc_num += desc_cnt;
  599. }
  600. /**
  601. * cpdma_chan_split_pool - Splits ctrl pool between all channels.
  602. * Has to be called under ctlr lock
  603. */
  604. static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
  605. {
  606. int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
  607. struct cpdma_desc_pool *pool = ctlr->pool;
  608. int free_rx_num = 0, free_tx_num = 0;
  609. int rx_weight = 0, tx_weight = 0;
  610. int tx_desc_num, rx_desc_num;
  611. struct cpdma_chan *chan;
  612. int i, tx_num = 0;
  613. if (!ctlr->chan_num)
  614. return 0;
  615. for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
  616. chan = ctlr->channels[i];
  617. if (!chan)
  618. continue;
  619. if (is_rx_chan(chan)) {
  620. if (!chan->weight)
  621. free_rx_num++;
  622. rx_weight += chan->weight;
  623. } else {
  624. if (!chan->weight)
  625. free_tx_num++;
  626. tx_weight += chan->weight;
  627. tx_num++;
  628. }
  629. }
  630. if (rx_weight > 100 || tx_weight > 100)
  631. return -EINVAL;
  632. tx_desc_num = (tx_num * pool->num_desc) / ctlr->chan_num;
  633. rx_desc_num = pool->num_desc - tx_desc_num;
  634. if (free_tx_num) {
  635. tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
  636. tx_per_ch_desc /= free_tx_num;
  637. }
  638. if (free_rx_num) {
  639. rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
  640. rx_per_ch_desc /= free_rx_num;
  641. }
  642. cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
  643. cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
  644. return 0;
  645. }
  646. /* cpdma_chan_set_weight - set weight of a channel in percentage.
  647. * Tx and Rx channels have separate weights. That is 100% for RX
  648. * and 100% for Tx. The weight is used to split cpdma resources
  649. * in correct proportion required by the channels, including number
  650. * of descriptors. The channel rate is not enough to know the
  651. * weight of a channel as the maximum rate of an interface is needed.
  652. * If weight = 0, then channel uses rest of descriptors leaved by
  653. * weighted channels.
  654. */
  655. int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
  656. {
  657. struct cpdma_ctlr *ctlr = ch->ctlr;
  658. unsigned long flags, ch_flags;
  659. int ret;
  660. spin_lock_irqsave(&ctlr->lock, flags);
  661. spin_lock_irqsave(&ch->lock, ch_flags);
  662. if (ch->weight == weight) {
  663. spin_unlock_irqrestore(&ch->lock, ch_flags);
  664. spin_unlock_irqrestore(&ctlr->lock, flags);
  665. return 0;
  666. }
  667. ch->weight = weight;
  668. spin_unlock_irqrestore(&ch->lock, ch_flags);
  669. /* re-split pool using new channel weight */
  670. ret = cpdma_chan_split_pool(ctlr);
  671. spin_unlock_irqrestore(&ctlr->lock, flags);
  672. return ret;
  673. }
  674. EXPORT_SYMBOL_GPL(cpdma_chan_set_weight);
  675. /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
  676. * Should be called before cpdma_chan_set_rate.
  677. * Returns min rate in Kb/s
  678. */
  679. u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
  680. {
  681. unsigned int divident, divisor;
  682. divident = ctlr->params.bus_freq_mhz * 32 * 1000;
  683. divisor = 1 + CPDMA_MAX_RLIM_CNT;
  684. return DIV_ROUND_UP(divident, divisor);
  685. }
  686. EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate);
  687. /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
  688. * The bandwidth * limited channels have to be in order beginning from lowest.
  689. * ch - transmit channel the bandwidth is configured for
  690. * rate - bandwidth in Kb/s, if 0 - then off shaper
  691. */
  692. int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
  693. {
  694. struct cpdma_ctlr *ctlr = ch->ctlr;
  695. unsigned long flags, ch_flags;
  696. int ret, prio_mode;
  697. u32 rmask;
  698. if (!ch || !is_tx_chan(ch))
  699. return -EINVAL;
  700. if (ch->rate == rate)
  701. return rate;
  702. spin_lock_irqsave(&ctlr->lock, flags);
  703. spin_lock_irqsave(&ch->lock, ch_flags);
  704. ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
  705. if (ret)
  706. goto err;
  707. ret = cpdma_chan_set_factors(ctlr, ch);
  708. if (ret)
  709. goto err;
  710. spin_unlock_irqrestore(&ch->lock, ch_flags);
  711. /* on shapers */
  712. _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
  713. _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
  714. spin_unlock_irqrestore(&ctlr->lock, flags);
  715. return ret;
  716. err:
  717. spin_unlock_irqrestore(&ch->lock, ch_flags);
  718. spin_unlock_irqrestore(&ctlr->lock, flags);
  719. return ret;
  720. }
  721. EXPORT_SYMBOL_GPL(cpdma_chan_set_rate);
  722. u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
  723. {
  724. unsigned long flags;
  725. u32 rate;
  726. spin_lock_irqsave(&ch->lock, flags);
  727. rate = ch->rate;
  728. spin_unlock_irqrestore(&ch->lock, flags);
  729. return rate;
  730. }
  731. EXPORT_SYMBOL_GPL(cpdma_chan_get_rate);
  732. struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
  733. cpdma_handler_fn handler, int rx_type)
  734. {
  735. int offset = chan_num * 4;
  736. struct cpdma_chan *chan;
  737. unsigned long flags;
  738. chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
  739. if (__chan_linear(chan_num) >= ctlr->num_chan)
  740. return NULL;
  741. chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
  742. if (!chan)
  743. return ERR_PTR(-ENOMEM);
  744. spin_lock_irqsave(&ctlr->lock, flags);
  745. if (ctlr->channels[chan_num]) {
  746. spin_unlock_irqrestore(&ctlr->lock, flags);
  747. devm_kfree(ctlr->dev, chan);
  748. return ERR_PTR(-EBUSY);
  749. }
  750. chan->ctlr = ctlr;
  751. chan->state = CPDMA_STATE_IDLE;
  752. chan->chan_num = chan_num;
  753. chan->handler = handler;
  754. chan->rate = 0;
  755. chan->desc_num = ctlr->pool->num_desc / 2;
  756. chan->weight = 0;
  757. if (is_rx_chan(chan)) {
  758. chan->hdp = ctlr->params.rxhdp + offset;
  759. chan->cp = ctlr->params.rxcp + offset;
  760. chan->rxfree = ctlr->params.rxfree + offset;
  761. chan->int_set = CPDMA_RXINTMASKSET;
  762. chan->int_clear = CPDMA_RXINTMASKCLEAR;
  763. chan->td = CPDMA_RXTEARDOWN;
  764. chan->dir = DMA_FROM_DEVICE;
  765. } else {
  766. chan->hdp = ctlr->params.txhdp + offset;
  767. chan->cp = ctlr->params.txcp + offset;
  768. chan->int_set = CPDMA_TXINTMASKSET;
  769. chan->int_clear = CPDMA_TXINTMASKCLEAR;
  770. chan->td = CPDMA_TXTEARDOWN;
  771. chan->dir = DMA_TO_DEVICE;
  772. }
  773. chan->mask = BIT(chan_linear(chan));
  774. spin_lock_init(&chan->lock);
  775. ctlr->channels[chan_num] = chan;
  776. ctlr->chan_num++;
  777. cpdma_chan_split_pool(ctlr);
  778. spin_unlock_irqrestore(&ctlr->lock, flags);
  779. return chan;
  780. }
  781. EXPORT_SYMBOL_GPL(cpdma_chan_create);
  782. int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
  783. {
  784. unsigned long flags;
  785. int desc_num;
  786. spin_lock_irqsave(&chan->lock, flags);
  787. desc_num = chan->desc_num;
  788. spin_unlock_irqrestore(&chan->lock, flags);
  789. return desc_num;
  790. }
  791. EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
  792. int cpdma_chan_destroy(struct cpdma_chan *chan)
  793. {
  794. struct cpdma_ctlr *ctlr;
  795. unsigned long flags;
  796. if (!chan)
  797. return -EINVAL;
  798. ctlr = chan->ctlr;
  799. spin_lock_irqsave(&ctlr->lock, flags);
  800. if (chan->state != CPDMA_STATE_IDLE)
  801. cpdma_chan_stop(chan);
  802. ctlr->channels[chan->chan_num] = NULL;
  803. ctlr->chan_num--;
  804. devm_kfree(ctlr->dev, chan);
  805. cpdma_chan_split_pool(ctlr);
  806. spin_unlock_irqrestore(&ctlr->lock, flags);
  807. return 0;
  808. }
  809. EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
  810. int cpdma_chan_get_stats(struct cpdma_chan *chan,
  811. struct cpdma_chan_stats *stats)
  812. {
  813. unsigned long flags;
  814. if (!chan)
  815. return -EINVAL;
  816. spin_lock_irqsave(&chan->lock, flags);
  817. memcpy(stats, &chan->stats, sizeof(*stats));
  818. spin_unlock_irqrestore(&chan->lock, flags);
  819. return 0;
  820. }
  821. EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
  822. static void __cpdma_chan_submit(struct cpdma_chan *chan,
  823. struct cpdma_desc __iomem *desc)
  824. {
  825. struct cpdma_ctlr *ctlr = chan->ctlr;
  826. struct cpdma_desc __iomem *prev = chan->tail;
  827. struct cpdma_desc_pool *pool = ctlr->pool;
  828. dma_addr_t desc_dma;
  829. u32 mode;
  830. desc_dma = desc_phys(pool, desc);
  831. /* simple case - idle channel */
  832. if (!chan->head) {
  833. chan->stats.head_enqueue++;
  834. chan->head = desc;
  835. chan->tail = desc;
  836. if (chan->state == CPDMA_STATE_ACTIVE)
  837. chan_write(chan, hdp, desc_dma);
  838. return;
  839. }
  840. /* first chain the descriptor at the tail of the list */
  841. desc_write(prev, hw_next, desc_dma);
  842. chan->tail = desc;
  843. chan->stats.tail_enqueue++;
  844. /* next check if EOQ has been triggered already */
  845. mode = desc_read(prev, hw_mode);
  846. if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
  847. (chan->state == CPDMA_STATE_ACTIVE)) {
  848. desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
  849. chan_write(chan, hdp, desc_dma);
  850. chan->stats.misqueued++;
  851. }
  852. }
  853. int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
  854. int len, int directed)
  855. {
  856. struct cpdma_ctlr *ctlr = chan->ctlr;
  857. struct cpdma_desc __iomem *desc;
  858. dma_addr_t buffer;
  859. unsigned long flags;
  860. u32 mode;
  861. int ret = 0;
  862. spin_lock_irqsave(&chan->lock, flags);
  863. if (chan->state == CPDMA_STATE_TEARDOWN) {
  864. ret = -EINVAL;
  865. goto unlock_ret;
  866. }
  867. if (chan->count >= chan->desc_num) {
  868. chan->stats.desc_alloc_fail++;
  869. ret = -ENOMEM;
  870. goto unlock_ret;
  871. }
  872. desc = cpdma_desc_alloc(ctlr->pool);
  873. if (!desc) {
  874. chan->stats.desc_alloc_fail++;
  875. ret = -ENOMEM;
  876. goto unlock_ret;
  877. }
  878. if (len < ctlr->params.min_packet_size) {
  879. len = ctlr->params.min_packet_size;
  880. chan->stats.runt_transmit_buff++;
  881. }
  882. buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
  883. ret = dma_mapping_error(ctlr->dev, buffer);
  884. if (ret) {
  885. cpdma_desc_free(ctlr->pool, desc, 1);
  886. ret = -EINVAL;
  887. goto unlock_ret;
  888. }
  889. mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
  890. cpdma_desc_to_port(chan, mode, directed);
  891. desc_write(desc, hw_next, 0);
  892. desc_write(desc, hw_buffer, buffer);
  893. desc_write(desc, hw_len, len);
  894. desc_write(desc, hw_mode, mode | len);
  895. desc_write(desc, sw_token, token);
  896. desc_write(desc, sw_buffer, buffer);
  897. desc_write(desc, sw_len, len);
  898. __cpdma_chan_submit(chan, desc);
  899. if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
  900. chan_write(chan, rxfree, 1);
  901. chan->count++;
  902. unlock_ret:
  903. spin_unlock_irqrestore(&chan->lock, flags);
  904. return ret;
  905. }
  906. EXPORT_SYMBOL_GPL(cpdma_chan_submit);
  907. bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
  908. {
  909. struct cpdma_ctlr *ctlr = chan->ctlr;
  910. struct cpdma_desc_pool *pool = ctlr->pool;
  911. bool free_tx_desc;
  912. unsigned long flags;
  913. spin_lock_irqsave(&chan->lock, flags);
  914. free_tx_desc = (chan->count < chan->desc_num) &&
  915. gen_pool_avail(pool->gen_pool);
  916. spin_unlock_irqrestore(&chan->lock, flags);
  917. return free_tx_desc;
  918. }
  919. EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
  920. static void __cpdma_chan_free(struct cpdma_chan *chan,
  921. struct cpdma_desc __iomem *desc,
  922. int outlen, int status)
  923. {
  924. struct cpdma_ctlr *ctlr = chan->ctlr;
  925. struct cpdma_desc_pool *pool = ctlr->pool;
  926. dma_addr_t buff_dma;
  927. int origlen;
  928. void *token;
  929. token = (void *)desc_read(desc, sw_token);
  930. buff_dma = desc_read(desc, sw_buffer);
  931. origlen = desc_read(desc, sw_len);
  932. dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
  933. cpdma_desc_free(pool, desc, 1);
  934. (*chan->handler)(token, outlen, status);
  935. }
  936. static int __cpdma_chan_process(struct cpdma_chan *chan)
  937. {
  938. struct cpdma_ctlr *ctlr = chan->ctlr;
  939. struct cpdma_desc __iomem *desc;
  940. int status, outlen;
  941. int cb_status = 0;
  942. struct cpdma_desc_pool *pool = ctlr->pool;
  943. dma_addr_t desc_dma;
  944. unsigned long flags;
  945. spin_lock_irqsave(&chan->lock, flags);
  946. desc = chan->head;
  947. if (!desc) {
  948. chan->stats.empty_dequeue++;
  949. status = -ENOENT;
  950. goto unlock_ret;
  951. }
  952. desc_dma = desc_phys(pool, desc);
  953. status = __raw_readl(&desc->hw_mode);
  954. outlen = status & 0x7ff;
  955. if (status & CPDMA_DESC_OWNER) {
  956. chan->stats.busy_dequeue++;
  957. status = -EBUSY;
  958. goto unlock_ret;
  959. }
  960. if (status & CPDMA_DESC_PASS_CRC)
  961. outlen -= CPDMA_DESC_CRC_LEN;
  962. status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
  963. CPDMA_DESC_PORT_MASK);
  964. chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
  965. chan_write(chan, cp, desc_dma);
  966. chan->count--;
  967. chan->stats.good_dequeue++;
  968. if (status & CPDMA_DESC_EOQ) {
  969. chan->stats.requeue++;
  970. chan_write(chan, hdp, desc_phys(pool, chan->head));
  971. }
  972. spin_unlock_irqrestore(&chan->lock, flags);
  973. if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
  974. cb_status = -ENOSYS;
  975. else
  976. cb_status = status;
  977. __cpdma_chan_free(chan, desc, outlen, cb_status);
  978. return status;
  979. unlock_ret:
  980. spin_unlock_irqrestore(&chan->lock, flags);
  981. return status;
  982. }
  983. int cpdma_chan_process(struct cpdma_chan *chan, int quota)
  984. {
  985. int used = 0, ret = 0;
  986. if (chan->state != CPDMA_STATE_ACTIVE)
  987. return -EINVAL;
  988. while (used < quota) {
  989. ret = __cpdma_chan_process(chan);
  990. if (ret < 0)
  991. break;
  992. used++;
  993. }
  994. return used;
  995. }
  996. EXPORT_SYMBOL_GPL(cpdma_chan_process);
  997. int cpdma_chan_start(struct cpdma_chan *chan)
  998. {
  999. struct cpdma_ctlr *ctlr = chan->ctlr;
  1000. unsigned long flags;
  1001. int ret;
  1002. spin_lock_irqsave(&ctlr->lock, flags);
  1003. ret = cpdma_chan_set_chan_shaper(chan);
  1004. spin_unlock_irqrestore(&ctlr->lock, flags);
  1005. if (ret)
  1006. return ret;
  1007. ret = cpdma_chan_on(chan);
  1008. if (ret)
  1009. return ret;
  1010. return 0;
  1011. }
  1012. EXPORT_SYMBOL_GPL(cpdma_chan_start);
  1013. int cpdma_chan_stop(struct cpdma_chan *chan)
  1014. {
  1015. struct cpdma_ctlr *ctlr = chan->ctlr;
  1016. struct cpdma_desc_pool *pool = ctlr->pool;
  1017. unsigned long flags;
  1018. int ret;
  1019. unsigned timeout;
  1020. spin_lock_irqsave(&chan->lock, flags);
  1021. if (chan->state == CPDMA_STATE_TEARDOWN) {
  1022. spin_unlock_irqrestore(&chan->lock, flags);
  1023. return -EINVAL;
  1024. }
  1025. chan->state = CPDMA_STATE_TEARDOWN;
  1026. dma_reg_write(ctlr, chan->int_clear, chan->mask);
  1027. /* trigger teardown */
  1028. dma_reg_write(ctlr, chan->td, chan_linear(chan));
  1029. /* wait for teardown complete */
  1030. timeout = 100 * 100; /* 100 ms */
  1031. while (timeout) {
  1032. u32 cp = chan_read(chan, cp);
  1033. if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
  1034. break;
  1035. udelay(10);
  1036. timeout--;
  1037. }
  1038. WARN_ON(!timeout);
  1039. chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
  1040. /* handle completed packets */
  1041. spin_unlock_irqrestore(&chan->lock, flags);
  1042. do {
  1043. ret = __cpdma_chan_process(chan);
  1044. if (ret < 0)
  1045. break;
  1046. } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
  1047. spin_lock_irqsave(&chan->lock, flags);
  1048. /* remaining packets haven't been tx/rx'ed, clean them up */
  1049. while (chan->head) {
  1050. struct cpdma_desc __iomem *desc = chan->head;
  1051. dma_addr_t next_dma;
  1052. next_dma = desc_read(desc, hw_next);
  1053. chan->head = desc_from_phys(pool, next_dma);
  1054. chan->count--;
  1055. chan->stats.teardown_dequeue++;
  1056. /* issue callback without locks held */
  1057. spin_unlock_irqrestore(&chan->lock, flags);
  1058. __cpdma_chan_free(chan, desc, 0, -ENOSYS);
  1059. spin_lock_irqsave(&chan->lock, flags);
  1060. }
  1061. chan->state = CPDMA_STATE_IDLE;
  1062. spin_unlock_irqrestore(&chan->lock, flags);
  1063. return 0;
  1064. }
  1065. EXPORT_SYMBOL_GPL(cpdma_chan_stop);
  1066. int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
  1067. {
  1068. unsigned long flags;
  1069. spin_lock_irqsave(&chan->lock, flags);
  1070. if (chan->state != CPDMA_STATE_ACTIVE) {
  1071. spin_unlock_irqrestore(&chan->lock, flags);
  1072. return -EINVAL;
  1073. }
  1074. dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
  1075. chan->mask);
  1076. spin_unlock_irqrestore(&chan->lock, flags);
  1077. return 0;
  1078. }
  1079. int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
  1080. {
  1081. unsigned long flags;
  1082. int ret;
  1083. spin_lock_irqsave(&ctlr->lock, flags);
  1084. ret = _cpdma_control_get(ctlr, control);
  1085. spin_unlock_irqrestore(&ctlr->lock, flags);
  1086. return ret;
  1087. }
  1088. int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
  1089. {
  1090. unsigned long flags;
  1091. int ret;
  1092. spin_lock_irqsave(&ctlr->lock, flags);
  1093. ret = _cpdma_control_set(ctlr, control, value);
  1094. spin_unlock_irqrestore(&ctlr->lock, flags);
  1095. return ret;
  1096. }
  1097. EXPORT_SYMBOL_GPL(cpdma_control_set);
  1098. MODULE_LICENSE("GPL");