xgene-dma.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063
  1. /*
  2. * Applied Micro X-Gene SoC DMA engine Driver
  3. *
  4. * Copyright (c) 2015, Applied Micro Circuits Corporation
  5. * Authors: Rameshwar Prasad Sahu <rsahu@apm.com>
  6. * Loc Ho <lho@apm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. *
  21. * NOTE: PM support is currently not available.
  22. */
  23. #include <linux/acpi.h>
  24. #include <linux/clk.h>
  25. #include <linux/delay.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/dmaengine.h>
  28. #include <linux/dmapool.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/io.h>
  31. #include <linux/module.h>
  32. #include <linux/of_device.h>
  33. #include "dmaengine.h"
  34. /* X-Gene DMA ring csr registers and bit definations */
  35. #define XGENE_DMA_RING_CONFIG 0x04
  36. #define XGENE_DMA_RING_ENABLE BIT(31)
  37. #define XGENE_DMA_RING_ID 0x08
  38. #define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31))
  39. #define XGENE_DMA_RING_ID_BUF 0x0C
  40. #define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21))
  41. #define XGENE_DMA_RING_THRESLD0_SET1 0x30
  42. #define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64
  43. #define XGENE_DMA_RING_THRESLD1_SET1 0x34
  44. #define XGENE_DMA_RING_THRESLD1_SET1_VAL 0xC8
  45. #define XGENE_DMA_RING_HYSTERESIS 0x68
  46. #define XGENE_DMA_RING_HYSTERESIS_VAL 0xFFFFFFFF
  47. #define XGENE_DMA_RING_STATE 0x6C
  48. #define XGENE_DMA_RING_STATE_WR_BASE 0x70
  49. #define XGENE_DMA_RING_NE_INT_MODE 0x017C
  50. #define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \
  51. ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v)))
  52. #define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \
  53. ((m) &= (~BIT(31 - (v))))
  54. #define XGENE_DMA_RING_CLKEN 0xC208
  55. #define XGENE_DMA_RING_SRST 0xC200
  56. #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
  57. #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
  58. #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
  59. #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
  60. #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
  61. #define XGENE_DMA_RING_CMD_OFFSET 0x2C
  62. #define XGENE_DMA_RING_CMD_BASE_OFFSET(v) ((v) << 6)
  63. #define XGENE_DMA_RING_COHERENT_SET(m) \
  64. (((u32 *)(m))[2] |= BIT(4))
  65. #define XGENE_DMA_RING_ADDRL_SET(m, v) \
  66. (((u32 *)(m))[2] |= (((v) >> 8) << 5))
  67. #define XGENE_DMA_RING_ADDRH_SET(m, v) \
  68. (((u32 *)(m))[3] |= ((v) >> 35))
  69. #define XGENE_DMA_RING_ACCEPTLERR_SET(m) \
  70. (((u32 *)(m))[3] |= BIT(19))
  71. #define XGENE_DMA_RING_SIZE_SET(m, v) \
  72. (((u32 *)(m))[3] |= ((v) << 23))
  73. #define XGENE_DMA_RING_RECOMBBUF_SET(m) \
  74. (((u32 *)(m))[3] |= BIT(27))
  75. #define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \
  76. (((u32 *)(m))[3] |= (0x7 << 28))
  77. #define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \
  78. (((u32 *)(m))[4] |= 0x3)
  79. #define XGENE_DMA_RING_SELTHRSH_SET(m) \
  80. (((u32 *)(m))[4] |= BIT(3))
  81. #define XGENE_DMA_RING_TYPE_SET(m, v) \
  82. (((u32 *)(m))[4] |= ((v) << 19))
  83. /* X-Gene DMA device csr registers and bit definitions */
  84. #define XGENE_DMA_IPBRR 0x0
  85. #define XGENE_DMA_DEV_ID_RD(v) ((v) & 0x00000FFF)
  86. #define XGENE_DMA_BUS_ID_RD(v) (((v) >> 12) & 3)
  87. #define XGENE_DMA_REV_NO_RD(v) (((v) >> 14) & 3)
  88. #define XGENE_DMA_GCR 0x10
  89. #define XGENE_DMA_CH_SETUP(v) \
  90. ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF)
  91. #define XGENE_DMA_ENABLE(v) ((v) |= BIT(31))
  92. #define XGENE_DMA_DISABLE(v) ((v) &= ~BIT(31))
  93. #define XGENE_DMA_RAID6_CONT 0x14
  94. #define XGENE_DMA_RAID6_MULTI_CTRL(v) ((v) << 24)
  95. #define XGENE_DMA_INT 0x70
  96. #define XGENE_DMA_INT_MASK 0x74
  97. #define XGENE_DMA_INT_ALL_MASK 0xFFFFFFFF
  98. #define XGENE_DMA_INT_ALL_UNMASK 0x0
  99. #define XGENE_DMA_INT_MASK_SHIFT 0x14
  100. #define XGENE_DMA_RING_INT0_MASK 0x90A0
  101. #define XGENE_DMA_RING_INT1_MASK 0x90A8
  102. #define XGENE_DMA_RING_INT2_MASK 0x90B0
  103. #define XGENE_DMA_RING_INT3_MASK 0x90B8
  104. #define XGENE_DMA_RING_INT4_MASK 0x90C0
  105. #define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0
  106. #define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF
  107. #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
  108. #define XGENE_DMA_BLK_MEM_RDY 0xD074
  109. #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
  110. #define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
  111. /* X-Gene SoC EFUSE csr register and bit defination */
  112. #define XGENE_SOC_JTAG1_SHADOW 0x18
  113. #define XGENE_DMA_PQ_DISABLE_MASK BIT(13)
  114. /* X-Gene DMA Descriptor format */
  115. #define XGENE_DMA_DESC_NV_BIT BIT_ULL(50)
  116. #define XGENE_DMA_DESC_IN_BIT BIT_ULL(55)
  117. #define XGENE_DMA_DESC_C_BIT BIT_ULL(63)
  118. #define XGENE_DMA_DESC_DR_BIT BIT_ULL(61)
  119. #define XGENE_DMA_DESC_ELERR_POS 46
  120. #define XGENE_DMA_DESC_RTYPE_POS 56
  121. #define XGENE_DMA_DESC_LERR_POS 60
  122. #define XGENE_DMA_DESC_BUFLEN_POS 48
  123. #define XGENE_DMA_DESC_HOENQ_NUM_POS 48
  124. #define XGENE_DMA_DESC_ELERR_RD(m) \
  125. (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3)
  126. #define XGENE_DMA_DESC_LERR_RD(m) \
  127. (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7)
  128. #define XGENE_DMA_DESC_STATUS(elerr, lerr) \
  129. (((elerr) << 4) | (lerr))
  130. /* X-Gene DMA descriptor empty s/w signature */
  131. #define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL
  132. /* X-Gene DMA configurable parameters defines */
  133. #define XGENE_DMA_RING_NUM 512
  134. #define XGENE_DMA_BUFNUM 0x0
  135. #define XGENE_DMA_CPU_BUFNUM 0x18
  136. #define XGENE_DMA_RING_OWNER_DMA 0x03
  137. #define XGENE_DMA_RING_OWNER_CPU 0x0F
  138. #define XGENE_DMA_RING_TYPE_REGULAR 0x01
  139. #define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */
  140. #define XGENE_DMA_RING_NUM_CONFIG 5
  141. #define XGENE_DMA_MAX_CHANNEL 4
  142. #define XGENE_DMA_XOR_CHANNEL 0
  143. #define XGENE_DMA_PQ_CHANNEL 1
  144. #define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */
  145. #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */
  146. #define XGENE_DMA_MAX_XOR_SRC 5
  147. #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0
  148. #define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL
  149. /* X-Gene DMA descriptor error codes */
  150. #define ERR_DESC_AXI 0x01
  151. #define ERR_BAD_DESC 0x02
  152. #define ERR_READ_DATA_AXI 0x03
  153. #define ERR_WRITE_DATA_AXI 0x04
  154. #define ERR_FBP_TIMEOUT 0x05
  155. #define ERR_ECC 0x06
  156. #define ERR_DIFF_SIZE 0x08
  157. #define ERR_SCT_GAT_LEN 0x09
  158. #define ERR_CRC_ERR 0x11
  159. #define ERR_CHKSUM 0x12
  160. #define ERR_DIF 0x13
  161. /* X-Gene DMA error interrupt codes */
  162. #define ERR_DIF_SIZE_INT 0x0
  163. #define ERR_GS_ERR_INT 0x1
  164. #define ERR_FPB_TIMEO_INT 0x2
  165. #define ERR_WFIFO_OVF_INT 0x3
  166. #define ERR_RFIFO_OVF_INT 0x4
  167. #define ERR_WR_TIMEO_INT 0x5
  168. #define ERR_RD_TIMEO_INT 0x6
  169. #define ERR_WR_ERR_INT 0x7
  170. #define ERR_RD_ERR_INT 0x8
  171. #define ERR_BAD_DESC_INT 0x9
  172. #define ERR_DESC_DST_INT 0xA
  173. #define ERR_DESC_SRC_INT 0xB
  174. /* X-Gene DMA flyby operation code */
  175. #define FLYBY_2SRC_XOR 0x80
  176. #define FLYBY_3SRC_XOR 0x90
  177. #define FLYBY_4SRC_XOR 0xA0
  178. #define FLYBY_5SRC_XOR 0xB0
  179. /* X-Gene DMA SW descriptor flags */
  180. #define XGENE_DMA_FLAG_64B_DESC BIT(0)
  181. /* Define to dump X-Gene DMA descriptor */
  182. #define XGENE_DMA_DESC_DUMP(desc, m) \
  183. print_hex_dump(KERN_ERR, (m), \
  184. DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0)
  185. #define to_dma_desc_sw(tx) \
  186. container_of(tx, struct xgene_dma_desc_sw, tx)
  187. #define to_dma_chan(dchan) \
  188. container_of(dchan, struct xgene_dma_chan, dma_chan)
  189. #define chan_dbg(chan, fmt, arg...) \
  190. dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
  191. #define chan_err(chan, fmt, arg...) \
  192. dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
  193. struct xgene_dma_desc_hw {
  194. __le64 m0;
  195. __le64 m1;
  196. __le64 m2;
  197. __le64 m3;
  198. };
  199. enum xgene_dma_ring_cfgsize {
  200. XGENE_DMA_RING_CFG_SIZE_512B,
  201. XGENE_DMA_RING_CFG_SIZE_2KB,
  202. XGENE_DMA_RING_CFG_SIZE_16KB,
  203. XGENE_DMA_RING_CFG_SIZE_64KB,
  204. XGENE_DMA_RING_CFG_SIZE_512KB,
  205. XGENE_DMA_RING_CFG_SIZE_INVALID
  206. };
  207. struct xgene_dma_ring {
  208. struct xgene_dma *pdma;
  209. u8 buf_num;
  210. u16 id;
  211. u16 num;
  212. u16 head;
  213. u16 owner;
  214. u16 slots;
  215. u16 dst_ring_num;
  216. u32 size;
  217. void __iomem *cmd;
  218. void __iomem *cmd_base;
  219. dma_addr_t desc_paddr;
  220. u32 state[XGENE_DMA_RING_NUM_CONFIG];
  221. enum xgene_dma_ring_cfgsize cfgsize;
  222. union {
  223. void *desc_vaddr;
  224. struct xgene_dma_desc_hw *desc_hw;
  225. };
  226. };
  227. struct xgene_dma_desc_sw {
  228. struct xgene_dma_desc_hw desc1;
  229. struct xgene_dma_desc_hw desc2;
  230. u32 flags;
  231. struct list_head node;
  232. struct list_head tx_list;
  233. struct dma_async_tx_descriptor tx;
  234. };
  235. /**
  236. * struct xgene_dma_chan - internal representation of an X-Gene DMA channel
  237. * @dma_chan: dmaengine channel object member
  238. * @pdma: X-Gene DMA device structure reference
  239. * @dev: struct device reference for dma mapping api
  240. * @id: raw id of this channel
  241. * @rx_irq: channel IRQ
  242. * @name: name of X-Gene DMA channel
  243. * @lock: serializes enqueue/dequeue operations to the descriptor pool
  244. * @pending: number of transaction request pushed to DMA controller for
  245. * execution, but still waiting for completion,
  246. * @max_outstanding: max number of outstanding request we can push to channel
  247. * @ld_pending: descriptors which are queued to run, but have not yet been
  248. * submitted to the hardware for execution
  249. * @ld_running: descriptors which are currently being executing by the hardware
  250. * @ld_completed: descriptors which have finished execution by the hardware.
  251. * These descriptors have already had their cleanup actions run. They
  252. * are waiting for the ACK bit to be set by the async tx API.
  253. * @desc_pool: descriptor pool for DMA operations
  254. * @tasklet: bottom half where all completed descriptors cleans
  255. * @tx_ring: transmit ring descriptor that we use to prepare actual
  256. * descriptors for further executions
  257. * @rx_ring: receive ring descriptor that we use to get completed DMA
  258. * descriptors during cleanup time
  259. */
  260. struct xgene_dma_chan {
  261. struct dma_chan dma_chan;
  262. struct xgene_dma *pdma;
  263. struct device *dev;
  264. int id;
  265. int rx_irq;
  266. char name[10];
  267. spinlock_t lock;
  268. int pending;
  269. int max_outstanding;
  270. struct list_head ld_pending;
  271. struct list_head ld_running;
  272. struct list_head ld_completed;
  273. struct dma_pool *desc_pool;
  274. struct tasklet_struct tasklet;
  275. struct xgene_dma_ring tx_ring;
  276. struct xgene_dma_ring rx_ring;
  277. };
  278. /**
  279. * struct xgene_dma - internal representation of an X-Gene DMA device
  280. * @err_irq: DMA error irq number
  281. * @ring_num: start id number for DMA ring
  282. * @csr_dma: base for DMA register access
  283. * @csr_ring: base for DMA ring register access
  284. * @csr_ring_cmd: base for DMA ring command register access
  285. * @csr_efuse: base for efuse register access
  286. * @dma_dev: embedded struct dma_device
  287. * @chan: reference to X-Gene DMA channels
  288. */
  289. struct xgene_dma {
  290. struct device *dev;
  291. struct clk *clk;
  292. int err_irq;
  293. int ring_num;
  294. void __iomem *csr_dma;
  295. void __iomem *csr_ring;
  296. void __iomem *csr_ring_cmd;
  297. void __iomem *csr_efuse;
  298. struct dma_device dma_dev[XGENE_DMA_MAX_CHANNEL];
  299. struct xgene_dma_chan chan[XGENE_DMA_MAX_CHANNEL];
  300. };
  301. static const char * const xgene_dma_desc_err[] = {
  302. [ERR_DESC_AXI] = "AXI error when reading src/dst link list",
  303. [ERR_BAD_DESC] = "ERR or El_ERR fields not set to zero in desc",
  304. [ERR_READ_DATA_AXI] = "AXI error when reading data",
  305. [ERR_WRITE_DATA_AXI] = "AXI error when writing data",
  306. [ERR_FBP_TIMEOUT] = "Timeout on bufpool fetch",
  307. [ERR_ECC] = "ECC double bit error",
  308. [ERR_DIFF_SIZE] = "Bufpool too small to hold all the DIF result",
  309. [ERR_SCT_GAT_LEN] = "Gather and scatter data length not same",
  310. [ERR_CRC_ERR] = "CRC error",
  311. [ERR_CHKSUM] = "Checksum error",
  312. [ERR_DIF] = "DIF error",
  313. };
  314. static const char * const xgene_dma_err[] = {
  315. [ERR_DIF_SIZE_INT] = "DIF size error",
  316. [ERR_GS_ERR_INT] = "Gather scatter not same size error",
  317. [ERR_FPB_TIMEO_INT] = "Free pool time out error",
  318. [ERR_WFIFO_OVF_INT] = "Write FIFO over flow error",
  319. [ERR_RFIFO_OVF_INT] = "Read FIFO over flow error",
  320. [ERR_WR_TIMEO_INT] = "Write time out error",
  321. [ERR_RD_TIMEO_INT] = "Read time out error",
  322. [ERR_WR_ERR_INT] = "HBF bus write error",
  323. [ERR_RD_ERR_INT] = "HBF bus read error",
  324. [ERR_BAD_DESC_INT] = "Ring descriptor HE0 not set error",
  325. [ERR_DESC_DST_INT] = "HFB reading dst link address error",
  326. [ERR_DESC_SRC_INT] = "HFB reading src link address error",
  327. };
  328. static bool is_pq_enabled(struct xgene_dma *pdma)
  329. {
  330. u32 val;
  331. val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW);
  332. return !(val & XGENE_DMA_PQ_DISABLE_MASK);
  333. }
  334. static u64 xgene_dma_encode_len(size_t len)
  335. {
  336. return (len < XGENE_DMA_MAX_BYTE_CNT) ?
  337. ((u64)len << XGENE_DMA_DESC_BUFLEN_POS) :
  338. XGENE_DMA_16K_BUFFER_LEN_CODE;
  339. }
  340. static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
  341. {
  342. static u8 flyby_type[] = {
  343. FLYBY_2SRC_XOR, /* Dummy */
  344. FLYBY_2SRC_XOR, /* Dummy */
  345. FLYBY_2SRC_XOR,
  346. FLYBY_3SRC_XOR,
  347. FLYBY_4SRC_XOR,
  348. FLYBY_5SRC_XOR
  349. };
  350. return flyby_type[src_cnt];
  351. }
  352. static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
  353. dma_addr_t *paddr)
  354. {
  355. size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ?
  356. *len : XGENE_DMA_MAX_BYTE_CNT;
  357. *ext8 |= cpu_to_le64(*paddr);
  358. *ext8 |= cpu_to_le64(xgene_dma_encode_len(nbytes));
  359. *len -= nbytes;
  360. *paddr += nbytes;
  361. }
  362. static void xgene_dma_invalidate_buffer(__le64 *ext8)
  363. {
  364. *ext8 |= cpu_to_le64(XGENE_DMA_INVALID_LEN_CODE);
  365. }
  366. static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx)
  367. {
  368. switch (idx) {
  369. case 0:
  370. return &desc->m1;
  371. case 1:
  372. return &desc->m0;
  373. case 2:
  374. return &desc->m3;
  375. case 3:
  376. return &desc->m2;
  377. default:
  378. pr_err("Invalid dma descriptor index\n");
  379. }
  380. return NULL;
  381. }
  382. static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc,
  383. u16 dst_ring_num)
  384. {
  385. desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT);
  386. desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA <<
  387. XGENE_DMA_DESC_RTYPE_POS);
  388. desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT);
  389. desc->m3 |= cpu_to_le64((u64)dst_ring_num <<
  390. XGENE_DMA_DESC_HOENQ_NUM_POS);
  391. }
  392. static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
  393. struct xgene_dma_desc_sw *desc_sw,
  394. dma_addr_t dst, dma_addr_t src,
  395. size_t len)
  396. {
  397. struct xgene_dma_desc_hw *desc1, *desc2;
  398. int i;
  399. /* Get 1st descriptor */
  400. desc1 = &desc_sw->desc1;
  401. xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
  402. /* Set destination address */
  403. desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
  404. desc1->m3 |= cpu_to_le64(dst);
  405. /* Set 1st source address */
  406. xgene_dma_set_src_buffer(&desc1->m1, &len, &src);
  407. if (!len)
  408. return;
  409. /*
  410. * We need to split this source buffer,
  411. * and need to use 2nd descriptor
  412. */
  413. desc2 = &desc_sw->desc2;
  414. desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
  415. /* Set 2nd to 5th source address */
  416. for (i = 0; i < 4 && len; i++)
  417. xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i),
  418. &len, &src);
  419. /* Invalidate unused source address field */
  420. for (; i < 4; i++)
  421. xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i));
  422. /* Updated flag that we have prepared 64B descriptor */
  423. desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
  424. }
  425. static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
  426. struct xgene_dma_desc_sw *desc_sw,
  427. dma_addr_t *dst, dma_addr_t *src,
  428. u32 src_cnt, size_t *nbytes,
  429. const u8 *scf)
  430. {
  431. struct xgene_dma_desc_hw *desc1, *desc2;
  432. size_t len = *nbytes;
  433. int i;
  434. desc1 = &desc_sw->desc1;
  435. desc2 = &desc_sw->desc2;
  436. /* Initialize DMA descriptor */
  437. xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
  438. /* Set destination address */
  439. desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
  440. desc1->m3 |= cpu_to_le64(*dst);
  441. /* We have multiple source addresses, so need to set NV bit*/
  442. desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
  443. /* Set flyby opcode */
  444. desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt));
  445. /* Set 1st to 5th source addresses */
  446. for (i = 0; i < src_cnt; i++) {
  447. len = *nbytes;
  448. xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 :
  449. xgene_dma_lookup_ext8(desc2, i - 1),
  450. &len, &src[i]);
  451. desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8)));
  452. }
  453. /* Update meta data */
  454. *nbytes = len;
  455. *dst += XGENE_DMA_MAX_BYTE_CNT;
  456. /* We need always 64B descriptor to perform xor or pq operations */
  457. desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
  458. }
  459. static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx)
  460. {
  461. struct xgene_dma_desc_sw *desc;
  462. struct xgene_dma_chan *chan;
  463. dma_cookie_t cookie;
  464. if (unlikely(!tx))
  465. return -EINVAL;
  466. chan = to_dma_chan(tx->chan);
  467. desc = to_dma_desc_sw(tx);
  468. spin_lock_bh(&chan->lock);
  469. cookie = dma_cookie_assign(tx);
  470. /* Add this transaction list onto the tail of the pending queue */
  471. list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
  472. spin_unlock_bh(&chan->lock);
  473. return cookie;
  474. }
  475. static void xgene_dma_clean_descriptor(struct xgene_dma_chan *chan,
  476. struct xgene_dma_desc_sw *desc)
  477. {
  478. list_del(&desc->node);
  479. chan_dbg(chan, "LD %p free\n", desc);
  480. dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
  481. }
  482. static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor(
  483. struct xgene_dma_chan *chan)
  484. {
  485. struct xgene_dma_desc_sw *desc;
  486. dma_addr_t phys;
  487. desc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &phys);
  488. if (!desc) {
  489. chan_err(chan, "Failed to allocate LDs\n");
  490. return NULL;
  491. }
  492. memset(desc, 0, sizeof(*desc));
  493. INIT_LIST_HEAD(&desc->tx_list);
  494. desc->tx.phys = phys;
  495. desc->tx.tx_submit = xgene_dma_tx_submit;
  496. dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan);
  497. chan_dbg(chan, "LD %p allocated\n", desc);
  498. return desc;
  499. }
  500. /**
  501. * xgene_dma_clean_completed_descriptor - free all descriptors which
  502. * has been completed and acked
  503. * @chan: X-Gene DMA channel
  504. *
  505. * This function is used on all completed and acked descriptors.
  506. */
  507. static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan *chan)
  508. {
  509. struct xgene_dma_desc_sw *desc, *_desc;
  510. /* Run the callback for each descriptor, in order */
  511. list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) {
  512. if (async_tx_test_ack(&desc->tx))
  513. xgene_dma_clean_descriptor(chan, desc);
  514. }
  515. }
  516. /**
  517. * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor
  518. * @chan: X-Gene DMA channel
  519. * @desc: descriptor to cleanup and free
  520. *
  521. * This function is used on a descriptor which has been executed by the DMA
  522. * controller. It will run any callbacks, submit any dependencies.
  523. */
  524. static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
  525. struct xgene_dma_desc_sw *desc)
  526. {
  527. struct dma_async_tx_descriptor *tx = &desc->tx;
  528. /*
  529. * If this is not the last transaction in the group,
  530. * then no need to complete cookie and run any callback as
  531. * this is not the tx_descriptor which had been sent to caller
  532. * of this DMA request
  533. */
  534. if (tx->cookie == 0)
  535. return;
  536. dma_cookie_complete(tx);
  537. /* Run the link descriptor callback function */
  538. if (tx->callback)
  539. tx->callback(tx->callback_param);
  540. dma_descriptor_unmap(tx);
  541. /* Run any dependencies */
  542. dma_run_dependencies(tx);
  543. }
  544. /**
  545. * xgene_dma_clean_running_descriptor - move the completed descriptor from
  546. * ld_running to ld_completed
  547. * @chan: X-Gene DMA channel
  548. * @desc: the descriptor which is completed
  549. *
  550. * Free the descriptor directly if acked by async_tx api,
  551. * else move it to queue ld_completed.
  552. */
  553. static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
  554. struct xgene_dma_desc_sw *desc)
  555. {
  556. /* Remove from the list of running transactions */
  557. list_del(&desc->node);
  558. /*
  559. * the client is allowed to attach dependent operations
  560. * until 'ack' is set
  561. */
  562. if (!async_tx_test_ack(&desc->tx)) {
  563. /*
  564. * Move this descriptor to the list of descriptors which is
  565. * completed, but still awaiting the 'ack' bit to be set.
  566. */
  567. list_add_tail(&desc->node, &chan->ld_completed);
  568. return;
  569. }
  570. chan_dbg(chan, "LD %p free\n", desc);
  571. dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
  572. }
  573. static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
  574. struct xgene_dma_desc_sw *desc_sw)
  575. {
  576. struct xgene_dma_ring *ring = &chan->tx_ring;
  577. struct xgene_dma_desc_hw *desc_hw;
  578. /* Get hw descriptor from DMA tx ring */
  579. desc_hw = &ring->desc_hw[ring->head];
  580. /*
  581. * Increment the head count to point next
  582. * descriptor for next time
  583. */
  584. if (++ring->head == ring->slots)
  585. ring->head = 0;
  586. /* Copy prepared sw descriptor data to hw descriptor */
  587. memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw));
  588. /*
  589. * Check if we have prepared 64B descriptor,
  590. * in this case we need one more hw descriptor
  591. */
  592. if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) {
  593. desc_hw = &ring->desc_hw[ring->head];
  594. if (++ring->head == ring->slots)
  595. ring->head = 0;
  596. memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
  597. }
  598. /* Increment the pending transaction count */
  599. chan->pending += ((desc_sw->flags &
  600. XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
  601. /* Notify the hw that we have descriptor ready for execution */
  602. iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
  603. 2 : 1, ring->cmd);
  604. }
  605. /**
  606. * xgene_chan_xfer_ld_pending - push any pending transactions to hw
  607. * @chan : X-Gene DMA channel
  608. *
  609. * LOCKING: must hold chan->lock
  610. */
  611. static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
  612. {
  613. struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
  614. /*
  615. * If the list of pending descriptors is empty, then we
  616. * don't need to do any work at all
  617. */
  618. if (list_empty(&chan->ld_pending)) {
  619. chan_dbg(chan, "No pending LDs\n");
  620. return;
  621. }
  622. /*
  623. * Move elements from the queue of pending transactions onto the list
  624. * of running transactions and push it to hw for further executions
  625. */
  626. list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) {
  627. /*
  628. * Check if have pushed max number of transactions to hw
  629. * as capable, so let's stop here and will push remaining
  630. * elements from pening ld queue after completing some
  631. * descriptors that we have already pushed
  632. */
  633. if (chan->pending >= chan->max_outstanding)
  634. return;
  635. xgene_chan_xfer_request(chan, desc_sw);
  636. /*
  637. * Delete this element from ld pending queue and append it to
  638. * ld running queue
  639. */
  640. list_move_tail(&desc_sw->node, &chan->ld_running);
  641. }
  642. }
  643. /**
  644. * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed
  645. * and move them to ld_completed to free until flag 'ack' is set
  646. * @chan: X-Gene DMA channel
  647. *
  648. * This function is used on descriptors which have been executed by the DMA
  649. * controller. It will run any callbacks, submit any dependencies, then
  650. * free these descriptors if flag 'ack' is set.
  651. */
  652. static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
  653. {
  654. struct xgene_dma_ring *ring = &chan->rx_ring;
  655. struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
  656. struct xgene_dma_desc_hw *desc_hw;
  657. struct list_head ld_completed;
  658. u8 status;
  659. INIT_LIST_HEAD(&ld_completed);
  660. spin_lock_bh(&chan->lock);
  661. /* Clean already completed and acked descriptors */
  662. xgene_dma_clean_completed_descriptor(chan);
  663. /* Move all completed descriptors to ld completed queue, in order */
  664. list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
  665. /* Get subsequent hw descriptor from DMA rx ring */
  666. desc_hw = &ring->desc_hw[ring->head];
  667. /* Check if this descriptor has been completed */
  668. if (unlikely(le64_to_cpu(desc_hw->m0) ==
  669. XGENE_DMA_DESC_EMPTY_SIGNATURE))
  670. break;
  671. if (++ring->head == ring->slots)
  672. ring->head = 0;
  673. /* Check if we have any error with DMA transactions */
  674. status = XGENE_DMA_DESC_STATUS(
  675. XGENE_DMA_DESC_ELERR_RD(le64_to_cpu(
  676. desc_hw->m0)),
  677. XGENE_DMA_DESC_LERR_RD(le64_to_cpu(
  678. desc_hw->m0)));
  679. if (status) {
  680. /* Print the DMA error type */
  681. chan_err(chan, "%s\n", xgene_dma_desc_err[status]);
  682. /*
  683. * We have DMA transactions error here. Dump DMA Tx
  684. * and Rx descriptors for this request */
  685. XGENE_DMA_DESC_DUMP(&desc_sw->desc1,
  686. "X-Gene DMA TX DESC1: ");
  687. if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC)
  688. XGENE_DMA_DESC_DUMP(&desc_sw->desc2,
  689. "X-Gene DMA TX DESC2: ");
  690. XGENE_DMA_DESC_DUMP(desc_hw,
  691. "X-Gene DMA RX ERR DESC: ");
  692. }
  693. /* Notify the hw about this completed descriptor */
  694. iowrite32(-1, ring->cmd);
  695. /* Mark this hw descriptor as processed */
  696. desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
  697. /*
  698. * Decrement the pending transaction count
  699. * as we have processed one
  700. */
  701. chan->pending -= ((desc_sw->flags &
  702. XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
  703. /*
  704. * Delete this node from ld running queue and append it to
  705. * ld completed queue for further processing
  706. */
  707. list_move_tail(&desc_sw->node, &ld_completed);
  708. }
  709. /*
  710. * Start any pending transactions automatically
  711. * In the ideal case, we keep the DMA controller busy while we go
  712. * ahead and free the descriptors below.
  713. */
  714. xgene_chan_xfer_ld_pending(chan);
  715. spin_unlock_bh(&chan->lock);
  716. /* Run the callback for each descriptor, in order */
  717. list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
  718. xgene_dma_run_tx_complete_actions(chan, desc_sw);
  719. xgene_dma_clean_running_descriptor(chan, desc_sw);
  720. }
  721. }
  722. static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
  723. {
  724. struct xgene_dma_chan *chan = to_dma_chan(dchan);
  725. /* Has this channel already been allocated? */
  726. if (chan->desc_pool)
  727. return 1;
  728. chan->desc_pool = dma_pool_create(chan->name, chan->dev,
  729. sizeof(struct xgene_dma_desc_sw),
  730. 0, 0);
  731. if (!chan->desc_pool) {
  732. chan_err(chan, "Failed to allocate descriptor pool\n");
  733. return -ENOMEM;
  734. }
  735. chan_dbg(chan, "Allocate descripto pool\n");
  736. return 1;
  737. }
  738. /**
  739. * xgene_dma_free_desc_list - Free all descriptors in a queue
  740. * @chan: X-Gene DMA channel
  741. * @list: the list to free
  742. *
  743. * LOCKING: must hold chan->lock
  744. */
  745. static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan,
  746. struct list_head *list)
  747. {
  748. struct xgene_dma_desc_sw *desc, *_desc;
  749. list_for_each_entry_safe(desc, _desc, list, node)
  750. xgene_dma_clean_descriptor(chan, desc);
  751. }
  752. static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
  753. {
  754. struct xgene_dma_chan *chan = to_dma_chan(dchan);
  755. chan_dbg(chan, "Free all resources\n");
  756. if (!chan->desc_pool)
  757. return;
  758. /* Process all running descriptor */
  759. xgene_dma_cleanup_descriptors(chan);
  760. spin_lock_bh(&chan->lock);
  761. /* Clean all link descriptor queues */
  762. xgene_dma_free_desc_list(chan, &chan->ld_pending);
  763. xgene_dma_free_desc_list(chan, &chan->ld_running);
  764. xgene_dma_free_desc_list(chan, &chan->ld_completed);
  765. spin_unlock_bh(&chan->lock);
  766. /* Delete this channel DMA pool */
  767. dma_pool_destroy(chan->desc_pool);
  768. chan->desc_pool = NULL;
  769. }
  770. static struct dma_async_tx_descriptor *xgene_dma_prep_memcpy(
  771. struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
  772. size_t len, unsigned long flags)
  773. {
  774. struct xgene_dma_desc_sw *first = NULL, *new;
  775. struct xgene_dma_chan *chan;
  776. size_t copy;
  777. if (unlikely(!dchan || !len))
  778. return NULL;
  779. chan = to_dma_chan(dchan);
  780. do {
  781. /* Allocate the link descriptor from DMA pool */
  782. new = xgene_dma_alloc_descriptor(chan);
  783. if (!new)
  784. goto fail;
  785. /* Create the largest transaction possible */
  786. copy = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
  787. /* Prepare DMA descriptor */
  788. xgene_dma_prep_cpy_desc(chan, new, dst, src, copy);
  789. if (!first)
  790. first = new;
  791. new->tx.cookie = 0;
  792. async_tx_ack(&new->tx);
  793. /* Update metadata */
  794. len -= copy;
  795. dst += copy;
  796. src += copy;
  797. /* Insert the link descriptor to the LD ring */
  798. list_add_tail(&new->node, &first->tx_list);
  799. } while (len);
  800. new->tx.flags = flags; /* client is in control of this ack */
  801. new->tx.cookie = -EBUSY;
  802. list_splice(&first->tx_list, &new->tx_list);
  803. return &new->tx;
  804. fail:
  805. if (!first)
  806. return NULL;
  807. xgene_dma_free_desc_list(chan, &first->tx_list);
  808. return NULL;
  809. }
  810. static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
  811. struct dma_chan *dchan, struct scatterlist *dst_sg,
  812. u32 dst_nents, struct scatterlist *src_sg,
  813. u32 src_nents, unsigned long flags)
  814. {
  815. struct xgene_dma_desc_sw *first = NULL, *new = NULL;
  816. struct xgene_dma_chan *chan;
  817. size_t dst_avail, src_avail;
  818. dma_addr_t dst, src;
  819. size_t len;
  820. if (unlikely(!dchan))
  821. return NULL;
  822. if (unlikely(!dst_nents || !src_nents))
  823. return NULL;
  824. if (unlikely(!dst_sg || !src_sg))
  825. return NULL;
  826. chan = to_dma_chan(dchan);
  827. /* Get prepared for the loop */
  828. dst_avail = sg_dma_len(dst_sg);
  829. src_avail = sg_dma_len(src_sg);
  830. dst_nents--;
  831. src_nents--;
  832. /* Run until we are out of scatterlist entries */
  833. while (true) {
  834. /* Create the largest transaction possible */
  835. len = min_t(size_t, src_avail, dst_avail);
  836. len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
  837. if (len == 0)
  838. goto fetch;
  839. dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
  840. src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
  841. /* Allocate the link descriptor from DMA pool */
  842. new = xgene_dma_alloc_descriptor(chan);
  843. if (!new)
  844. goto fail;
  845. /* Prepare DMA descriptor */
  846. xgene_dma_prep_cpy_desc(chan, new, dst, src, len);
  847. if (!first)
  848. first = new;
  849. new->tx.cookie = 0;
  850. async_tx_ack(&new->tx);
  851. /* update metadata */
  852. dst_avail -= len;
  853. src_avail -= len;
  854. /* Insert the link descriptor to the LD ring */
  855. list_add_tail(&new->node, &first->tx_list);
  856. fetch:
  857. /* fetch the next dst scatterlist entry */
  858. if (dst_avail == 0) {
  859. /* no more entries: we're done */
  860. if (dst_nents == 0)
  861. break;
  862. /* fetch the next entry: if there are no more: done */
  863. dst_sg = sg_next(dst_sg);
  864. if (!dst_sg)
  865. break;
  866. dst_nents--;
  867. dst_avail = sg_dma_len(dst_sg);
  868. }
  869. /* fetch the next src scatterlist entry */
  870. if (src_avail == 0) {
  871. /* no more entries: we're done */
  872. if (src_nents == 0)
  873. break;
  874. /* fetch the next entry: if there are no more: done */
  875. src_sg = sg_next(src_sg);
  876. if (!src_sg)
  877. break;
  878. src_nents--;
  879. src_avail = sg_dma_len(src_sg);
  880. }
  881. }
  882. if (!new)
  883. return NULL;
  884. new->tx.flags = flags; /* client is in control of this ack */
  885. new->tx.cookie = -EBUSY;
  886. list_splice(&first->tx_list, &new->tx_list);
  887. return &new->tx;
  888. fail:
  889. if (!first)
  890. return NULL;
  891. xgene_dma_free_desc_list(chan, &first->tx_list);
  892. return NULL;
  893. }
  894. static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
  895. struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
  896. u32 src_cnt, size_t len, unsigned long flags)
  897. {
  898. struct xgene_dma_desc_sw *first = NULL, *new;
  899. struct xgene_dma_chan *chan;
  900. static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {
  901. 0x01, 0x01, 0x01, 0x01, 0x01};
  902. if (unlikely(!dchan || !len))
  903. return NULL;
  904. chan = to_dma_chan(dchan);
  905. do {
  906. /* Allocate the link descriptor from DMA pool */
  907. new = xgene_dma_alloc_descriptor(chan);
  908. if (!new)
  909. goto fail;
  910. /* Prepare xor DMA descriptor */
  911. xgene_dma_prep_xor_desc(chan, new, &dst, src,
  912. src_cnt, &len, multi);
  913. if (!first)
  914. first = new;
  915. new->tx.cookie = 0;
  916. async_tx_ack(&new->tx);
  917. /* Insert the link descriptor to the LD ring */
  918. list_add_tail(&new->node, &first->tx_list);
  919. } while (len);
  920. new->tx.flags = flags; /* client is in control of this ack */
  921. new->tx.cookie = -EBUSY;
  922. list_splice(&first->tx_list, &new->tx_list);
  923. return &new->tx;
  924. fail:
  925. if (!first)
  926. return NULL;
  927. xgene_dma_free_desc_list(chan, &first->tx_list);
  928. return NULL;
  929. }
  930. static struct dma_async_tx_descriptor *xgene_dma_prep_pq(
  931. struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
  932. u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
  933. {
  934. struct xgene_dma_desc_sw *first = NULL, *new;
  935. struct xgene_dma_chan *chan;
  936. size_t _len = len;
  937. dma_addr_t _src[XGENE_DMA_MAX_XOR_SRC];
  938. static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {0x01, 0x01, 0x01, 0x01, 0x01};
  939. if (unlikely(!dchan || !len))
  940. return NULL;
  941. chan = to_dma_chan(dchan);
  942. /*
  943. * Save source addresses on local variable, may be we have to
  944. * prepare two descriptor to generate P and Q if both enabled
  945. * in the flags by client
  946. */
  947. memcpy(_src, src, sizeof(*src) * src_cnt);
  948. if (flags & DMA_PREP_PQ_DISABLE_P)
  949. len = 0;
  950. if (flags & DMA_PREP_PQ_DISABLE_Q)
  951. _len = 0;
  952. do {
  953. /* Allocate the link descriptor from DMA pool */
  954. new = xgene_dma_alloc_descriptor(chan);
  955. if (!new)
  956. goto fail;
  957. if (!first)
  958. first = new;
  959. new->tx.cookie = 0;
  960. async_tx_ack(&new->tx);
  961. /* Insert the link descriptor to the LD ring */
  962. list_add_tail(&new->node, &first->tx_list);
  963. /*
  964. * Prepare DMA descriptor to generate P,
  965. * if DMA_PREP_PQ_DISABLE_P flag is not set
  966. */
  967. if (len) {
  968. xgene_dma_prep_xor_desc(chan, new, &dst[0], src,
  969. src_cnt, &len, multi);
  970. continue;
  971. }
  972. /*
  973. * Prepare DMA descriptor to generate Q,
  974. * if DMA_PREP_PQ_DISABLE_Q flag is not set
  975. */
  976. if (_len) {
  977. xgene_dma_prep_xor_desc(chan, new, &dst[1], _src,
  978. src_cnt, &_len, scf);
  979. }
  980. } while (len || _len);
  981. new->tx.flags = flags; /* client is in control of this ack */
  982. new->tx.cookie = -EBUSY;
  983. list_splice(&first->tx_list, &new->tx_list);
  984. return &new->tx;
  985. fail:
  986. if (!first)
  987. return NULL;
  988. xgene_dma_free_desc_list(chan, &first->tx_list);
  989. return NULL;
  990. }
  991. static void xgene_dma_issue_pending(struct dma_chan *dchan)
  992. {
  993. struct xgene_dma_chan *chan = to_dma_chan(dchan);
  994. spin_lock_bh(&chan->lock);
  995. xgene_chan_xfer_ld_pending(chan);
  996. spin_unlock_bh(&chan->lock);
  997. }
  998. static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan,
  999. dma_cookie_t cookie,
  1000. struct dma_tx_state *txstate)
  1001. {
  1002. return dma_cookie_status(dchan, cookie, txstate);
  1003. }
  1004. static void xgene_dma_tasklet_cb(unsigned long data)
  1005. {
  1006. struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
  1007. /* Run all cleanup for descriptors which have been completed */
  1008. xgene_dma_cleanup_descriptors(chan);
  1009. /* Re-enable DMA channel IRQ */
  1010. enable_irq(chan->rx_irq);
  1011. }
  1012. static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
  1013. {
  1014. struct xgene_dma_chan *chan = (struct xgene_dma_chan *)id;
  1015. BUG_ON(!chan);
  1016. /*
  1017. * Disable DMA channel IRQ until we process completed
  1018. * descriptors
  1019. */
  1020. disable_irq_nosync(chan->rx_irq);
  1021. /*
  1022. * Schedule the tasklet to handle all cleanup of the current
  1023. * transaction. It will start a new transaction if there is
  1024. * one pending.
  1025. */
  1026. tasklet_schedule(&chan->tasklet);
  1027. return IRQ_HANDLED;
  1028. }
  1029. static irqreturn_t xgene_dma_err_isr(int irq, void *id)
  1030. {
  1031. struct xgene_dma *pdma = (struct xgene_dma *)id;
  1032. unsigned long int_mask;
  1033. u32 val, i;
  1034. val = ioread32(pdma->csr_dma + XGENE_DMA_INT);
  1035. /* Clear DMA interrupts */
  1036. iowrite32(val, pdma->csr_dma + XGENE_DMA_INT);
  1037. /* Print DMA error info */
  1038. int_mask = val >> XGENE_DMA_INT_MASK_SHIFT;
  1039. for_each_set_bit(i, &int_mask, ARRAY_SIZE(xgene_dma_err))
  1040. dev_err(pdma->dev,
  1041. "Interrupt status 0x%08X %s\n", val, xgene_dma_err[i]);
  1042. return IRQ_HANDLED;
  1043. }
  1044. static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring)
  1045. {
  1046. int i;
  1047. iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE);
  1048. for (i = 0; i < XGENE_DMA_RING_NUM_CONFIG; i++)
  1049. iowrite32(ring->state[i], ring->pdma->csr_ring +
  1050. XGENE_DMA_RING_STATE_WR_BASE + (i * 4));
  1051. }
  1052. static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring)
  1053. {
  1054. memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG);
  1055. xgene_dma_wr_ring_state(ring);
  1056. }
  1057. static void xgene_dma_setup_ring(struct xgene_dma_ring *ring)
  1058. {
  1059. void *ring_cfg = ring->state;
  1060. u64 addr = ring->desc_paddr;
  1061. u32 i, val;
  1062. ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE;
  1063. /* Clear DMA ring state */
  1064. xgene_dma_clr_ring_state(ring);
  1065. /* Set DMA ring type */
  1066. XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR);
  1067. if (ring->owner == XGENE_DMA_RING_OWNER_DMA) {
  1068. /* Set recombination buffer and timeout */
  1069. XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg);
  1070. XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg);
  1071. XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg);
  1072. }
  1073. /* Initialize DMA ring state */
  1074. XGENE_DMA_RING_SELTHRSH_SET(ring_cfg);
  1075. XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg);
  1076. XGENE_DMA_RING_COHERENT_SET(ring_cfg);
  1077. XGENE_DMA_RING_ADDRL_SET(ring_cfg, addr);
  1078. XGENE_DMA_RING_ADDRH_SET(ring_cfg, addr);
  1079. XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize);
  1080. /* Write DMA ring configurations */
  1081. xgene_dma_wr_ring_state(ring);
  1082. /* Set DMA ring id */
  1083. iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id),
  1084. ring->pdma->csr_ring + XGENE_DMA_RING_ID);
  1085. /* Set DMA ring buffer */
  1086. iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num),
  1087. ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
  1088. if (ring->owner != XGENE_DMA_RING_OWNER_CPU)
  1089. return;
  1090. /* Set empty signature to DMA Rx ring descriptors */
  1091. for (i = 0; i < ring->slots; i++) {
  1092. struct xgene_dma_desc_hw *desc;
  1093. desc = &ring->desc_hw[i];
  1094. desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
  1095. }
  1096. /* Enable DMA Rx ring interrupt */
  1097. val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
  1098. XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num);
  1099. iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
  1100. }
  1101. static void xgene_dma_clear_ring(struct xgene_dma_ring *ring)
  1102. {
  1103. u32 ring_id, val;
  1104. if (ring->owner == XGENE_DMA_RING_OWNER_CPU) {
  1105. /* Disable DMA Rx ring interrupt */
  1106. val = ioread32(ring->pdma->csr_ring +
  1107. XGENE_DMA_RING_NE_INT_MODE);
  1108. XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num);
  1109. iowrite32(val, ring->pdma->csr_ring +
  1110. XGENE_DMA_RING_NE_INT_MODE);
  1111. }
  1112. /* Clear DMA ring state */
  1113. ring_id = XGENE_DMA_RING_ID_SETUP(ring->id);
  1114. iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID);
  1115. iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
  1116. xgene_dma_clr_ring_state(ring);
  1117. }
  1118. static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring)
  1119. {
  1120. ring->cmd_base = ring->pdma->csr_ring_cmd +
  1121. XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num -
  1122. XGENE_DMA_RING_NUM));
  1123. ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET;
  1124. }
  1125. static int xgene_dma_get_ring_size(struct xgene_dma_chan *chan,
  1126. enum xgene_dma_ring_cfgsize cfgsize)
  1127. {
  1128. int size;
  1129. switch (cfgsize) {
  1130. case XGENE_DMA_RING_CFG_SIZE_512B:
  1131. size = 0x200;
  1132. break;
  1133. case XGENE_DMA_RING_CFG_SIZE_2KB:
  1134. size = 0x800;
  1135. break;
  1136. case XGENE_DMA_RING_CFG_SIZE_16KB:
  1137. size = 0x4000;
  1138. break;
  1139. case XGENE_DMA_RING_CFG_SIZE_64KB:
  1140. size = 0x10000;
  1141. break;
  1142. case XGENE_DMA_RING_CFG_SIZE_512KB:
  1143. size = 0x80000;
  1144. break;
  1145. default:
  1146. chan_err(chan, "Unsupported cfg ring size %d\n", cfgsize);
  1147. return -EINVAL;
  1148. }
  1149. return size;
  1150. }
  1151. static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring)
  1152. {
  1153. /* Clear DMA ring configurations */
  1154. xgene_dma_clear_ring(ring);
  1155. /* De-allocate DMA ring descriptor */
  1156. if (ring->desc_vaddr) {
  1157. dma_free_coherent(ring->pdma->dev, ring->size,
  1158. ring->desc_vaddr, ring->desc_paddr);
  1159. ring->desc_vaddr = NULL;
  1160. }
  1161. }
  1162. static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan)
  1163. {
  1164. xgene_dma_delete_ring_one(&chan->rx_ring);
  1165. xgene_dma_delete_ring_one(&chan->tx_ring);
  1166. }
  1167. static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
  1168. struct xgene_dma_ring *ring,
  1169. enum xgene_dma_ring_cfgsize cfgsize)
  1170. {
  1171. int ret;
  1172. /* Setup DMA ring descriptor variables */
  1173. ring->pdma = chan->pdma;
  1174. ring->cfgsize = cfgsize;
  1175. ring->num = chan->pdma->ring_num++;
  1176. ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
  1177. ret = xgene_dma_get_ring_size(chan, cfgsize);
  1178. if (ret <= 0)
  1179. return ret;
  1180. ring->size = ret;
  1181. /* Allocate memory for DMA ring descriptor */
  1182. ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
  1183. &ring->desc_paddr, GFP_KERNEL);
  1184. if (!ring->desc_vaddr) {
  1185. chan_err(chan, "Failed to allocate ring desc\n");
  1186. return -ENOMEM;
  1187. }
  1188. /* Configure and enable DMA ring */
  1189. xgene_dma_set_ring_cmd(ring);
  1190. xgene_dma_setup_ring(ring);
  1191. return 0;
  1192. }
  1193. static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
  1194. {
  1195. struct xgene_dma_ring *rx_ring = &chan->rx_ring;
  1196. struct xgene_dma_ring *tx_ring = &chan->tx_ring;
  1197. int ret;
  1198. /* Create DMA Rx ring descriptor */
  1199. rx_ring->owner = XGENE_DMA_RING_OWNER_CPU;
  1200. rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id;
  1201. ret = xgene_dma_create_ring_one(chan, rx_ring,
  1202. XGENE_DMA_RING_CFG_SIZE_64KB);
  1203. if (ret)
  1204. return ret;
  1205. chan_dbg(chan, "Rx ring id 0x%X num %d desc 0x%p\n",
  1206. rx_ring->id, rx_ring->num, rx_ring->desc_vaddr);
  1207. /* Create DMA Tx ring descriptor */
  1208. tx_ring->owner = XGENE_DMA_RING_OWNER_DMA;
  1209. tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id;
  1210. ret = xgene_dma_create_ring_one(chan, tx_ring,
  1211. XGENE_DMA_RING_CFG_SIZE_64KB);
  1212. if (ret) {
  1213. xgene_dma_delete_ring_one(rx_ring);
  1214. return ret;
  1215. }
  1216. tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num);
  1217. chan_dbg(chan,
  1218. "Tx ring id 0x%X num %d desc 0x%p\n",
  1219. tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
  1220. /* Set the max outstanding request possible to this channel */
  1221. chan->max_outstanding = tx_ring->slots;
  1222. return ret;
  1223. }
  1224. static int xgene_dma_init_rings(struct xgene_dma *pdma)
  1225. {
  1226. int ret, i, j;
  1227. for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
  1228. ret = xgene_dma_create_chan_rings(&pdma->chan[i]);
  1229. if (ret) {
  1230. for (j = 0; j < i; j++)
  1231. xgene_dma_delete_chan_rings(&pdma->chan[j]);
  1232. return ret;
  1233. }
  1234. }
  1235. return ret;
  1236. }
  1237. static void xgene_dma_enable(struct xgene_dma *pdma)
  1238. {
  1239. u32 val;
  1240. /* Configure and enable DMA engine */
  1241. val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
  1242. XGENE_DMA_CH_SETUP(val);
  1243. XGENE_DMA_ENABLE(val);
  1244. iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
  1245. }
  1246. static void xgene_dma_disable(struct xgene_dma *pdma)
  1247. {
  1248. u32 val;
  1249. val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
  1250. XGENE_DMA_DISABLE(val);
  1251. iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
  1252. }
  1253. static void xgene_dma_mask_interrupts(struct xgene_dma *pdma)
  1254. {
  1255. /*
  1256. * Mask DMA ring overflow, underflow and
  1257. * AXI write/read error interrupts
  1258. */
  1259. iowrite32(XGENE_DMA_INT_ALL_MASK,
  1260. pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
  1261. iowrite32(XGENE_DMA_INT_ALL_MASK,
  1262. pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
  1263. iowrite32(XGENE_DMA_INT_ALL_MASK,
  1264. pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
  1265. iowrite32(XGENE_DMA_INT_ALL_MASK,
  1266. pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
  1267. iowrite32(XGENE_DMA_INT_ALL_MASK,
  1268. pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
  1269. /* Mask DMA error interrupts */
  1270. iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK);
  1271. }
  1272. static void xgene_dma_unmask_interrupts(struct xgene_dma *pdma)
  1273. {
  1274. /*
  1275. * Unmask DMA ring overflow, underflow and
  1276. * AXI write/read error interrupts
  1277. */
  1278. iowrite32(XGENE_DMA_INT_ALL_UNMASK,
  1279. pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
  1280. iowrite32(XGENE_DMA_INT_ALL_UNMASK,
  1281. pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
  1282. iowrite32(XGENE_DMA_INT_ALL_UNMASK,
  1283. pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
  1284. iowrite32(XGENE_DMA_INT_ALL_UNMASK,
  1285. pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
  1286. iowrite32(XGENE_DMA_INT_ALL_UNMASK,
  1287. pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
  1288. /* Unmask DMA error interrupts */
  1289. iowrite32(XGENE_DMA_INT_ALL_UNMASK,
  1290. pdma->csr_dma + XGENE_DMA_INT_MASK);
  1291. }
  1292. static void xgene_dma_init_hw(struct xgene_dma *pdma)
  1293. {
  1294. u32 val;
  1295. /* Associate DMA ring to corresponding ring HW */
  1296. iowrite32(XGENE_DMA_ASSOC_RING_MNGR1,
  1297. pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC);
  1298. /* Configure RAID6 polynomial control setting */
  1299. if (is_pq_enabled(pdma))
  1300. iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D),
  1301. pdma->csr_dma + XGENE_DMA_RAID6_CONT);
  1302. else
  1303. dev_info(pdma->dev, "PQ is disabled in HW\n");
  1304. xgene_dma_enable(pdma);
  1305. xgene_dma_unmask_interrupts(pdma);
  1306. /* Get DMA id and version info */
  1307. val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR);
  1308. /* DMA device info */
  1309. dev_info(pdma->dev,
  1310. "X-Gene DMA v%d.%02d.%02d driver registered %d channels",
  1311. XGENE_DMA_REV_NO_RD(val), XGENE_DMA_BUS_ID_RD(val),
  1312. XGENE_DMA_DEV_ID_RD(val), XGENE_DMA_MAX_CHANNEL);
  1313. }
  1314. static int xgene_dma_init_ring_mngr(struct xgene_dma *pdma)
  1315. {
  1316. if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) &&
  1317. (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST)))
  1318. return 0;
  1319. iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN);
  1320. iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST);
  1321. /* Bring up memory */
  1322. iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
  1323. /* Force a barrier */
  1324. ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
  1325. /* reset may take up to 1ms */
  1326. usleep_range(1000, 1100);
  1327. if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY)
  1328. != XGENE_DMA_RING_BLK_MEM_RDY_VAL) {
  1329. dev_err(pdma->dev,
  1330. "Failed to release ring mngr memory from shutdown\n");
  1331. return -ENODEV;
  1332. }
  1333. /* program threshold set 1 and all hysteresis */
  1334. iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL,
  1335. pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1);
  1336. iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL,
  1337. pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1);
  1338. iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL,
  1339. pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS);
  1340. /* Enable QPcore and assign error queue */
  1341. iowrite32(XGENE_DMA_RING_ENABLE,
  1342. pdma->csr_ring + XGENE_DMA_RING_CONFIG);
  1343. return 0;
  1344. }
  1345. static int xgene_dma_init_mem(struct xgene_dma *pdma)
  1346. {
  1347. int ret;
  1348. ret = xgene_dma_init_ring_mngr(pdma);
  1349. if (ret)
  1350. return ret;
  1351. /* Bring up memory */
  1352. iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
  1353. /* Force a barrier */
  1354. ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
  1355. /* reset may take up to 1ms */
  1356. usleep_range(1000, 1100);
  1357. if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY)
  1358. != XGENE_DMA_BLK_MEM_RDY_VAL) {
  1359. dev_err(pdma->dev,
  1360. "Failed to release DMA memory from shutdown\n");
  1361. return -ENODEV;
  1362. }
  1363. return 0;
  1364. }
  1365. static int xgene_dma_request_irqs(struct xgene_dma *pdma)
  1366. {
  1367. struct xgene_dma_chan *chan;
  1368. int ret, i, j;
  1369. /* Register DMA error irq */
  1370. ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr,
  1371. 0, "dma_error", pdma);
  1372. if (ret) {
  1373. dev_err(pdma->dev,
  1374. "Failed to register error IRQ %d\n", pdma->err_irq);
  1375. return ret;
  1376. }
  1377. /* Register DMA channel rx irq */
  1378. for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
  1379. chan = &pdma->chan[i];
  1380. ret = devm_request_irq(chan->dev, chan->rx_irq,
  1381. xgene_dma_chan_ring_isr,
  1382. 0, chan->name, chan);
  1383. if (ret) {
  1384. chan_err(chan, "Failed to register Rx IRQ %d\n",
  1385. chan->rx_irq);
  1386. devm_free_irq(pdma->dev, pdma->err_irq, pdma);
  1387. for (j = 0; j < i; j++) {
  1388. chan = &pdma->chan[i];
  1389. devm_free_irq(chan->dev, chan->rx_irq, chan);
  1390. }
  1391. return ret;
  1392. }
  1393. }
  1394. return 0;
  1395. }
  1396. static void xgene_dma_free_irqs(struct xgene_dma *pdma)
  1397. {
  1398. struct xgene_dma_chan *chan;
  1399. int i;
  1400. /* Free DMA device error irq */
  1401. devm_free_irq(pdma->dev, pdma->err_irq, pdma);
  1402. for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
  1403. chan = &pdma->chan[i];
  1404. devm_free_irq(chan->dev, chan->rx_irq, chan);
  1405. }
  1406. }
  1407. static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
  1408. struct dma_device *dma_dev)
  1409. {
  1410. /* Initialize DMA device capability mask */
  1411. dma_cap_zero(dma_dev->cap_mask);
  1412. /* Set DMA device capability */
  1413. dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
  1414. dma_cap_set(DMA_SG, dma_dev->cap_mask);
  1415. /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
  1416. * and channel 1 supports XOR, PQ both. First thing here is we have
  1417. * mechanism in hw to enable/disable PQ/XOR supports on channel 1,
  1418. * we can make sure this by reading SoC Efuse register.
  1419. * Second thing, we have hw errata that if we run channel 0 and
  1420. * channel 1 simultaneously with executing XOR and PQ request,
  1421. * suddenly DMA engine hangs, So here we enable XOR on channel 0 only
  1422. * if XOR and PQ supports on channel 1 is disabled.
  1423. */
  1424. if ((chan->id == XGENE_DMA_PQ_CHANNEL) &&
  1425. is_pq_enabled(chan->pdma)) {
  1426. dma_cap_set(DMA_PQ, dma_dev->cap_mask);
  1427. dma_cap_set(DMA_XOR, dma_dev->cap_mask);
  1428. } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) &&
  1429. !is_pq_enabled(chan->pdma)) {
  1430. dma_cap_set(DMA_XOR, dma_dev->cap_mask);
  1431. }
  1432. /* Set base and prep routines */
  1433. dma_dev->dev = chan->dev;
  1434. dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources;
  1435. dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
  1436. dma_dev->device_issue_pending = xgene_dma_issue_pending;
  1437. dma_dev->device_tx_status = xgene_dma_tx_status;
  1438. dma_dev->device_prep_dma_memcpy = xgene_dma_prep_memcpy;
  1439. dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
  1440. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  1441. dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
  1442. dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
  1443. dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES;
  1444. }
  1445. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  1446. dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
  1447. dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
  1448. dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES;
  1449. }
  1450. }
  1451. static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
  1452. {
  1453. struct xgene_dma_chan *chan = &pdma->chan[id];
  1454. struct dma_device *dma_dev = &pdma->dma_dev[id];
  1455. int ret;
  1456. chan->dma_chan.device = dma_dev;
  1457. spin_lock_init(&chan->lock);
  1458. INIT_LIST_HEAD(&chan->ld_pending);
  1459. INIT_LIST_HEAD(&chan->ld_running);
  1460. INIT_LIST_HEAD(&chan->ld_completed);
  1461. tasklet_init(&chan->tasklet, xgene_dma_tasklet_cb,
  1462. (unsigned long)chan);
  1463. chan->pending = 0;
  1464. chan->desc_pool = NULL;
  1465. dma_cookie_init(&chan->dma_chan);
  1466. /* Setup dma device capabilities and prep routines */
  1467. xgene_dma_set_caps(chan, dma_dev);
  1468. /* Initialize DMA device list head */
  1469. INIT_LIST_HEAD(&dma_dev->channels);
  1470. list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels);
  1471. /* Register with Linux async DMA framework*/
  1472. ret = dma_async_device_register(dma_dev);
  1473. if (ret) {
  1474. chan_err(chan, "Failed to register async device %d", ret);
  1475. tasklet_kill(&chan->tasklet);
  1476. return ret;
  1477. }
  1478. /* DMA capability info */
  1479. dev_info(pdma->dev,
  1480. "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan),
  1481. dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "",
  1482. dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
  1483. dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
  1484. dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
  1485. return 0;
  1486. }
  1487. static int xgene_dma_init_async(struct xgene_dma *pdma)
  1488. {
  1489. int ret, i, j;
  1490. for (i = 0; i < XGENE_DMA_MAX_CHANNEL ; i++) {
  1491. ret = xgene_dma_async_register(pdma, i);
  1492. if (ret) {
  1493. for (j = 0; j < i; j++) {
  1494. dma_async_device_unregister(&pdma->dma_dev[j]);
  1495. tasklet_kill(&pdma->chan[j].tasklet);
  1496. }
  1497. return ret;
  1498. }
  1499. }
  1500. return ret;
  1501. }
  1502. static void xgene_dma_async_unregister(struct xgene_dma *pdma)
  1503. {
  1504. int i;
  1505. for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
  1506. dma_async_device_unregister(&pdma->dma_dev[i]);
  1507. }
  1508. static void xgene_dma_init_channels(struct xgene_dma *pdma)
  1509. {
  1510. struct xgene_dma_chan *chan;
  1511. int i;
  1512. pdma->ring_num = XGENE_DMA_RING_NUM;
  1513. for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
  1514. chan = &pdma->chan[i];
  1515. chan->dev = pdma->dev;
  1516. chan->pdma = pdma;
  1517. chan->id = i;
  1518. snprintf(chan->name, sizeof(chan->name), "dmachan%d", chan->id);
  1519. }
  1520. }
  1521. static int xgene_dma_get_resources(struct platform_device *pdev,
  1522. struct xgene_dma *pdma)
  1523. {
  1524. struct resource *res;
  1525. int irq, i;
  1526. /* Get DMA csr region */
  1527. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1528. if (!res) {
  1529. dev_err(&pdev->dev, "Failed to get csr region\n");
  1530. return -ENXIO;
  1531. }
  1532. pdma->csr_dma = devm_ioremap(&pdev->dev, res->start,
  1533. resource_size(res));
  1534. if (!pdma->csr_dma) {
  1535. dev_err(&pdev->dev, "Failed to ioremap csr region");
  1536. return -ENOMEM;
  1537. }
  1538. /* Get DMA ring csr region */
  1539. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1540. if (!res) {
  1541. dev_err(&pdev->dev, "Failed to get ring csr region\n");
  1542. return -ENXIO;
  1543. }
  1544. pdma->csr_ring = devm_ioremap(&pdev->dev, res->start,
  1545. resource_size(res));
  1546. if (!pdma->csr_ring) {
  1547. dev_err(&pdev->dev, "Failed to ioremap ring csr region");
  1548. return -ENOMEM;
  1549. }
  1550. /* Get DMA ring cmd csr region */
  1551. res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
  1552. if (!res) {
  1553. dev_err(&pdev->dev, "Failed to get ring cmd csr region\n");
  1554. return -ENXIO;
  1555. }
  1556. pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start,
  1557. resource_size(res));
  1558. if (!pdma->csr_ring_cmd) {
  1559. dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region");
  1560. return -ENOMEM;
  1561. }
  1562. pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
  1563. /* Get efuse csr region */
  1564. res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
  1565. if (!res) {
  1566. dev_err(&pdev->dev, "Failed to get efuse csr region\n");
  1567. return -ENXIO;
  1568. }
  1569. pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start,
  1570. resource_size(res));
  1571. if (!pdma->csr_efuse) {
  1572. dev_err(&pdev->dev, "Failed to ioremap efuse csr region");
  1573. return -ENOMEM;
  1574. }
  1575. /* Get DMA error interrupt */
  1576. irq = platform_get_irq(pdev, 0);
  1577. if (irq <= 0) {
  1578. dev_err(&pdev->dev, "Failed to get Error IRQ\n");
  1579. return -ENXIO;
  1580. }
  1581. pdma->err_irq = irq;
  1582. /* Get DMA Rx ring descriptor interrupts for all DMA channels */
  1583. for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) {
  1584. irq = platform_get_irq(pdev, i);
  1585. if (irq <= 0) {
  1586. dev_err(&pdev->dev, "Failed to get Rx IRQ\n");
  1587. return -ENXIO;
  1588. }
  1589. pdma->chan[i - 1].rx_irq = irq;
  1590. }
  1591. return 0;
  1592. }
  1593. static int xgene_dma_probe(struct platform_device *pdev)
  1594. {
  1595. struct xgene_dma *pdma;
  1596. int ret, i;
  1597. pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL);
  1598. if (!pdma)
  1599. return -ENOMEM;
  1600. pdma->dev = &pdev->dev;
  1601. platform_set_drvdata(pdev, pdma);
  1602. ret = xgene_dma_get_resources(pdev, pdma);
  1603. if (ret)
  1604. return ret;
  1605. pdma->clk = devm_clk_get(&pdev->dev, NULL);
  1606. if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) {
  1607. dev_err(&pdev->dev, "Failed to get clk\n");
  1608. return PTR_ERR(pdma->clk);
  1609. }
  1610. /* Enable clk before accessing registers */
  1611. if (!IS_ERR(pdma->clk)) {
  1612. ret = clk_prepare_enable(pdma->clk);
  1613. if (ret) {
  1614. dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
  1615. return ret;
  1616. }
  1617. }
  1618. /* Remove DMA RAM out of shutdown */
  1619. ret = xgene_dma_init_mem(pdma);
  1620. if (ret)
  1621. goto err_clk_enable;
  1622. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42));
  1623. if (ret) {
  1624. dev_err(&pdev->dev, "No usable DMA configuration\n");
  1625. goto err_dma_mask;
  1626. }
  1627. /* Initialize DMA channels software state */
  1628. xgene_dma_init_channels(pdma);
  1629. /* Configue DMA rings */
  1630. ret = xgene_dma_init_rings(pdma);
  1631. if (ret)
  1632. goto err_clk_enable;
  1633. ret = xgene_dma_request_irqs(pdma);
  1634. if (ret)
  1635. goto err_request_irq;
  1636. /* Configure and enable DMA engine */
  1637. xgene_dma_init_hw(pdma);
  1638. /* Register DMA device with linux async framework */
  1639. ret = xgene_dma_init_async(pdma);
  1640. if (ret)
  1641. goto err_async_init;
  1642. return 0;
  1643. err_async_init:
  1644. xgene_dma_free_irqs(pdma);
  1645. err_request_irq:
  1646. for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
  1647. xgene_dma_delete_chan_rings(&pdma->chan[i]);
  1648. err_dma_mask:
  1649. err_clk_enable:
  1650. if (!IS_ERR(pdma->clk))
  1651. clk_disable_unprepare(pdma->clk);
  1652. return ret;
  1653. }
  1654. static int xgene_dma_remove(struct platform_device *pdev)
  1655. {
  1656. struct xgene_dma *pdma = platform_get_drvdata(pdev);
  1657. struct xgene_dma_chan *chan;
  1658. int i;
  1659. xgene_dma_async_unregister(pdma);
  1660. /* Mask interrupts and disable DMA engine */
  1661. xgene_dma_mask_interrupts(pdma);
  1662. xgene_dma_disable(pdma);
  1663. xgene_dma_free_irqs(pdma);
  1664. for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
  1665. chan = &pdma->chan[i];
  1666. tasklet_kill(&chan->tasklet);
  1667. xgene_dma_delete_chan_rings(chan);
  1668. }
  1669. if (!IS_ERR(pdma->clk))
  1670. clk_disable_unprepare(pdma->clk);
  1671. return 0;
  1672. }
  1673. #ifdef CONFIG_ACPI
  1674. static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = {
  1675. {"APMC0D43", 0},
  1676. {},
  1677. };
  1678. MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr);
  1679. #endif
  1680. static const struct of_device_id xgene_dma_of_match_ptr[] = {
  1681. {.compatible = "apm,xgene-storm-dma",},
  1682. {},
  1683. };
  1684. MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr);
  1685. static struct platform_driver xgene_dma_driver = {
  1686. .probe = xgene_dma_probe,
  1687. .remove = xgene_dma_remove,
  1688. .driver = {
  1689. .name = "X-Gene-DMA",
  1690. .of_match_table = xgene_dma_of_match_ptr,
  1691. .acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr),
  1692. },
  1693. };
  1694. module_platform_driver(xgene_dma_driver);
  1695. MODULE_DESCRIPTION("APM X-Gene SoC DMA driver");
  1696. MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>");
  1697. MODULE_AUTHOR("Loc Ho <lho@apm.com>");
  1698. MODULE_LICENSE("GPL");
  1699. MODULE_VERSION("1.0");