ce.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  4. * Copyright (c) 2018 The Linux Foundation. All rights reserved.
  5. *
  6. * Permission to use, copy, modify, and/or distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hif.h"
  19. #include "ce.h"
  20. #include "debug.h"
  21. /*
  22. * Support for Copy Engine hardware, which is mainly used for
  23. * communication between Host and Target over a PCIe interconnect.
  24. */
  25. /*
  26. * A single CopyEngine (CE) comprises two "rings":
  27. * a source ring
  28. * a destination ring
  29. *
  30. * Each ring consists of a number of descriptors which specify
  31. * an address, length, and meta-data.
  32. *
  33. * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target)
  34. * controls one ring and the other side controls the other ring.
  35. * The source side chooses when to initiate a transfer and it
  36. * chooses what to send (buffer address, length). The destination
  37. * side keeps a supply of "anonymous receive buffers" available and
  38. * it handles incoming data as it arrives (when the destination
  39. * receives an interrupt).
  40. *
  41. * The sender may send a simple buffer (address/length) or it may
  42. * send a small list of buffers. When a small list is sent, hardware
  43. * "gathers" these and they end up in a single destination buffer
  44. * with a single interrupt.
  45. *
  46. * There are several "contexts" managed by this layer -- more, it
  47. * may seem -- than should be needed. These are provided mainly for
  48. * maximum flexibility and especially to facilitate a simpler HIF
  49. * implementation. There are per-CopyEngine recv, send, and watermark
  50. * contexts. These are supplied by the caller when a recv, send,
  51. * or watermark handler is established and they are echoed back to
  52. * the caller when the respective callbacks are invoked. There is
  53. * also a per-transfer context supplied by the caller when a buffer
  54. * (or sendlist) is sent and when a buffer is enqueued for recv.
  55. * These per-transfer contexts are echoed back to the caller when
  56. * the buffer is sent/received.
  57. */
  58. static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
  59. struct ath10k_ce_pipe *ce_state)
  60. {
  61. u32 ce_id = ce_state->id;
  62. u32 addr = 0;
  63. switch (ce_id) {
  64. case 0:
  65. addr = 0x00032000;
  66. break;
  67. case 3:
  68. addr = 0x0003200C;
  69. break;
  70. case 4:
  71. addr = 0x00032010;
  72. break;
  73. case 5:
  74. addr = 0x00032014;
  75. break;
  76. case 7:
  77. addr = 0x0003201C;
  78. break;
  79. default:
  80. ath10k_warn(ar, "invalid CE id: %d", ce_id);
  81. break;
  82. }
  83. return addr;
  84. }
  85. static inline u32 shadow_dst_wr_ind_addr(struct ath10k *ar,
  86. struct ath10k_ce_pipe *ce_state)
  87. {
  88. u32 ce_id = ce_state->id;
  89. u32 addr = 0;
  90. switch (ce_id) {
  91. case 1:
  92. addr = 0x00032034;
  93. break;
  94. case 2:
  95. addr = 0x00032038;
  96. break;
  97. case 5:
  98. addr = 0x00032044;
  99. break;
  100. case 7:
  101. addr = 0x0003204C;
  102. break;
  103. case 8:
  104. addr = 0x00032050;
  105. break;
  106. case 9:
  107. addr = 0x00032054;
  108. break;
  109. case 10:
  110. addr = 0x00032058;
  111. break;
  112. case 11:
  113. addr = 0x0003205C;
  114. break;
  115. default:
  116. ath10k_warn(ar, "invalid CE id: %d", ce_id);
  117. break;
  118. }
  119. return addr;
  120. }
  121. static inline unsigned int
  122. ath10k_set_ring_byte(unsigned int offset,
  123. struct ath10k_hw_ce_regs_addr_map *addr_map)
  124. {
  125. return ((offset << addr_map->lsb) & addr_map->mask);
  126. }
  127. static inline unsigned int
  128. ath10k_get_ring_byte(unsigned int offset,
  129. struct ath10k_hw_ce_regs_addr_map *addr_map)
  130. {
  131. return ((offset & addr_map->mask) >> (addr_map->lsb));
  132. }
  133. static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
  134. {
  135. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  136. return ce->bus_ops->read32(ar, offset);
  137. }
  138. static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
  139. {
  140. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  141. ce->bus_ops->write32(ar, offset, value);
  142. }
  143. static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
  144. u32 ce_ctrl_addr,
  145. unsigned int n)
  146. {
  147. ath10k_ce_write32(ar, ce_ctrl_addr +
  148. ar->hw_ce_regs->dst_wr_index_addr, n);
  149. }
  150. static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
  151. u32 ce_ctrl_addr)
  152. {
  153. return ath10k_ce_read32(ar, ce_ctrl_addr +
  154. ar->hw_ce_regs->dst_wr_index_addr);
  155. }
  156. static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
  157. u32 ce_ctrl_addr,
  158. unsigned int n)
  159. {
  160. ath10k_ce_write32(ar, ce_ctrl_addr +
  161. ar->hw_ce_regs->sr_wr_index_addr, n);
  162. }
  163. static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
  164. u32 ce_ctrl_addr)
  165. {
  166. return ath10k_ce_read32(ar, ce_ctrl_addr +
  167. ar->hw_ce_regs->sr_wr_index_addr);
  168. }
  169. static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
  170. u32 ce_id)
  171. {
  172. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  173. return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
  174. }
  175. static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
  176. u32 ce_ctrl_addr)
  177. {
  178. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  179. u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
  180. struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
  181. u32 index;
  182. if (ar->hw_params.rri_on_ddr &&
  183. (ce_state->attr_flags & CE_ATTR_DIS_INTR))
  184. index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
  185. else
  186. index = ath10k_ce_read32(ar, ce_ctrl_addr +
  187. ar->hw_ce_regs->current_srri_addr);
  188. return index;
  189. }
  190. static inline void
  191. ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
  192. struct ath10k_ce_pipe *ce_state,
  193. unsigned int value)
  194. {
  195. ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
  196. }
  197. static inline void
  198. ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
  199. struct ath10k_ce_pipe *ce_state,
  200. unsigned int value)
  201. {
  202. ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value);
  203. }
  204. static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
  205. u32 ce_ctrl_addr,
  206. unsigned int addr)
  207. {
  208. ath10k_ce_write32(ar, ce_ctrl_addr +
  209. ar->hw_ce_regs->sr_base_addr, addr);
  210. }
  211. static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
  212. u32 ce_ctrl_addr,
  213. unsigned int n)
  214. {
  215. ath10k_ce_write32(ar, ce_ctrl_addr +
  216. ar->hw_ce_regs->sr_size_addr, n);
  217. }
  218. static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
  219. u32 ce_ctrl_addr,
  220. unsigned int n)
  221. {
  222. struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
  223. u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
  224. ctrl_regs->addr);
  225. ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
  226. (ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
  227. ath10k_set_ring_byte(n, ctrl_regs->dmax));
  228. }
  229. static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
  230. u32 ce_ctrl_addr,
  231. unsigned int n)
  232. {
  233. struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
  234. u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
  235. ctrl_regs->addr);
  236. ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
  237. (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
  238. ath10k_set_ring_byte(n, ctrl_regs->src_ring));
  239. }
  240. static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
  241. u32 ce_ctrl_addr,
  242. unsigned int n)
  243. {
  244. struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
  245. u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
  246. ctrl_regs->addr);
  247. ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
  248. (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
  249. ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
  250. }
  251. static inline
  252. u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id)
  253. {
  254. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  255. return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
  256. CE_DDR_RRI_MASK;
  257. }
  258. static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
  259. u32 ce_ctrl_addr)
  260. {
  261. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  262. u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
  263. struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
  264. u32 index;
  265. if (ar->hw_params.rri_on_ddr &&
  266. (ce_state->attr_flags & CE_ATTR_DIS_INTR))
  267. index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
  268. else
  269. index = ath10k_ce_read32(ar, ce_ctrl_addr +
  270. ar->hw_ce_regs->current_drri_addr);
  271. return index;
  272. }
  273. static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
  274. u32 ce_ctrl_addr,
  275. u32 addr)
  276. {
  277. ath10k_ce_write32(ar, ce_ctrl_addr +
  278. ar->hw_ce_regs->dr_base_addr, addr);
  279. }
  280. static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
  281. u32 ce_ctrl_addr,
  282. unsigned int n)
  283. {
  284. ath10k_ce_write32(ar, ce_ctrl_addr +
  285. ar->hw_ce_regs->dr_size_addr, n);
  286. }
  287. static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
  288. u32 ce_ctrl_addr,
  289. unsigned int n)
  290. {
  291. struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
  292. u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
  293. ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
  294. (addr & ~(srcr_wm->wm_high->mask)) |
  295. (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
  296. }
  297. static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
  298. u32 ce_ctrl_addr,
  299. unsigned int n)
  300. {
  301. struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
  302. u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
  303. ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
  304. (addr & ~(srcr_wm->wm_low->mask)) |
  305. (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
  306. }
  307. static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
  308. u32 ce_ctrl_addr,
  309. unsigned int n)
  310. {
  311. struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
  312. u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
  313. ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
  314. (addr & ~(dstr_wm->wm_high->mask)) |
  315. (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
  316. }
  317. static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
  318. u32 ce_ctrl_addr,
  319. unsigned int n)
  320. {
  321. struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
  322. u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
  323. ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
  324. (addr & ~(dstr_wm->wm_low->mask)) |
  325. (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
  326. }
  327. static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
  328. u32 ce_ctrl_addr)
  329. {
  330. struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
  331. u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
  332. ar->hw_ce_regs->host_ie_addr);
  333. ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
  334. host_ie_addr | host_ie->copy_complete->mask);
  335. }
  336. static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
  337. u32 ce_ctrl_addr)
  338. {
  339. struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
  340. u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
  341. ar->hw_ce_regs->host_ie_addr);
  342. ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
  343. host_ie_addr & ~(host_ie->copy_complete->mask));
  344. }
  345. static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
  346. u32 ce_ctrl_addr)
  347. {
  348. struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
  349. u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
  350. ar->hw_ce_regs->host_ie_addr);
  351. ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
  352. host_ie_addr & ~(wm_regs->wm_mask));
  353. }
  354. static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
  355. u32 ce_ctrl_addr)
  356. {
  357. struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
  358. u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
  359. ar->hw_ce_regs->misc_ie_addr);
  360. ath10k_ce_write32(ar,
  361. ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
  362. misc_ie_addr | misc_regs->err_mask);
  363. }
  364. static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
  365. u32 ce_ctrl_addr)
  366. {
  367. struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
  368. u32 misc_ie_addr = ath10k_ce_read32(ar,
  369. ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
  370. ath10k_ce_write32(ar,
  371. ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
  372. misc_ie_addr & ~(misc_regs->err_mask));
  373. }
  374. static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
  375. u32 ce_ctrl_addr,
  376. unsigned int mask)
  377. {
  378. struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
  379. ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
  380. }
  381. /*
  382. * Guts of ath10k_ce_send.
  383. * The caller takes responsibility for any needed locking.
  384. */
  385. static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
  386. void *per_transfer_context,
  387. dma_addr_t buffer,
  388. unsigned int nbytes,
  389. unsigned int transfer_id,
  390. unsigned int flags)
  391. {
  392. struct ath10k *ar = ce_state->ar;
  393. struct ath10k_ce_ring *src_ring = ce_state->src_ring;
  394. struct ce_desc *desc, sdesc;
  395. unsigned int nentries_mask = src_ring->nentries_mask;
  396. unsigned int sw_index = src_ring->sw_index;
  397. unsigned int write_index = src_ring->write_index;
  398. u32 ctrl_addr = ce_state->ctrl_addr;
  399. u32 desc_flags = 0;
  400. int ret = 0;
  401. if (nbytes > ce_state->src_sz_max)
  402. ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
  403. __func__, nbytes, ce_state->src_sz_max);
  404. if (unlikely(CE_RING_DELTA(nentries_mask,
  405. write_index, sw_index - 1) <= 0)) {
  406. ret = -ENOSR;
  407. goto exit;
  408. }
  409. desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
  410. write_index);
  411. desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
  412. if (flags & CE_SEND_FLAG_GATHER)
  413. desc_flags |= CE_DESC_FLAGS_GATHER;
  414. if (flags & CE_SEND_FLAG_BYTE_SWAP)
  415. desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
  416. sdesc.addr = __cpu_to_le32(buffer);
  417. sdesc.nbytes = __cpu_to_le16(nbytes);
  418. sdesc.flags = __cpu_to_le16(desc_flags);
  419. *desc = sdesc;
  420. src_ring->per_transfer_context[write_index] = per_transfer_context;
  421. /* Update Source Ring Write Index */
  422. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  423. /* WORKAROUND */
  424. if (!(flags & CE_SEND_FLAG_GATHER)) {
  425. if (ar->hw_params.shadow_reg_support)
  426. ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
  427. write_index);
  428. else
  429. ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
  430. write_index);
  431. }
  432. src_ring->write_index = write_index;
  433. exit:
  434. return ret;
  435. }
  436. static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
  437. void *per_transfer_context,
  438. dma_addr_t buffer,
  439. unsigned int nbytes,
  440. unsigned int transfer_id,
  441. unsigned int flags)
  442. {
  443. struct ath10k *ar = ce_state->ar;
  444. struct ath10k_ce_ring *src_ring = ce_state->src_ring;
  445. struct ce_desc_64 *desc, sdesc;
  446. unsigned int nentries_mask = src_ring->nentries_mask;
  447. unsigned int sw_index;
  448. unsigned int write_index = src_ring->write_index;
  449. u32 ctrl_addr = ce_state->ctrl_addr;
  450. __le32 *addr;
  451. u32 desc_flags = 0;
  452. int ret = 0;
  453. if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
  454. return -ESHUTDOWN;
  455. if (nbytes > ce_state->src_sz_max)
  456. ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
  457. __func__, nbytes, ce_state->src_sz_max);
  458. if (ar->hw_params.rri_on_ddr)
  459. sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id);
  460. else
  461. sw_index = src_ring->sw_index;
  462. if (unlikely(CE_RING_DELTA(nentries_mask,
  463. write_index, sw_index - 1) <= 0)) {
  464. ret = -ENOSR;
  465. goto exit;
  466. }
  467. desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
  468. write_index);
  469. desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
  470. if (flags & CE_SEND_FLAG_GATHER)
  471. desc_flags |= CE_DESC_FLAGS_GATHER;
  472. if (flags & CE_SEND_FLAG_BYTE_SWAP)
  473. desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
  474. addr = (__le32 *)&sdesc.addr;
  475. flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
  476. addr[0] = __cpu_to_le32(buffer);
  477. addr[1] = __cpu_to_le32(flags);
  478. if (flags & CE_SEND_FLAG_GATHER)
  479. addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
  480. else
  481. addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
  482. sdesc.nbytes = __cpu_to_le16(nbytes);
  483. sdesc.flags = __cpu_to_le16(desc_flags);
  484. *desc = sdesc;
  485. src_ring->per_transfer_context[write_index] = per_transfer_context;
  486. /* Update Source Ring Write Index */
  487. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  488. if (!(flags & CE_SEND_FLAG_GATHER))
  489. ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
  490. src_ring->write_index = write_index;
  491. exit:
  492. return ret;
  493. }
  494. int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
  495. void *per_transfer_context,
  496. dma_addr_t buffer,
  497. unsigned int nbytes,
  498. unsigned int transfer_id,
  499. unsigned int flags)
  500. {
  501. return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
  502. buffer, nbytes, transfer_id, flags);
  503. }
  504. EXPORT_SYMBOL(ath10k_ce_send_nolock);
  505. void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
  506. {
  507. struct ath10k *ar = pipe->ar;
  508. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  509. struct ath10k_ce_ring *src_ring = pipe->src_ring;
  510. u32 ctrl_addr = pipe->ctrl_addr;
  511. lockdep_assert_held(&ce->ce_lock);
  512. /*
  513. * This function must be called only if there is an incomplete
  514. * scatter-gather transfer (before index register is updated)
  515. * that needs to be cleaned up.
  516. */
  517. if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
  518. return;
  519. if (WARN_ON_ONCE(src_ring->write_index ==
  520. ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
  521. return;
  522. src_ring->write_index--;
  523. src_ring->write_index &= src_ring->nentries_mask;
  524. src_ring->per_transfer_context[src_ring->write_index] = NULL;
  525. }
  526. EXPORT_SYMBOL(__ath10k_ce_send_revert);
  527. int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
  528. void *per_transfer_context,
  529. dma_addr_t buffer,
  530. unsigned int nbytes,
  531. unsigned int transfer_id,
  532. unsigned int flags)
  533. {
  534. struct ath10k *ar = ce_state->ar;
  535. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  536. int ret;
  537. spin_lock_bh(&ce->ce_lock);
  538. ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
  539. buffer, nbytes, transfer_id, flags);
  540. spin_unlock_bh(&ce->ce_lock);
  541. return ret;
  542. }
  543. EXPORT_SYMBOL(ath10k_ce_send);
  544. int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
  545. {
  546. struct ath10k *ar = pipe->ar;
  547. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  548. int delta;
  549. spin_lock_bh(&ce->ce_lock);
  550. delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
  551. pipe->src_ring->write_index,
  552. pipe->src_ring->sw_index - 1);
  553. spin_unlock_bh(&ce->ce_lock);
  554. return delta;
  555. }
  556. EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);
  557. int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
  558. {
  559. struct ath10k *ar = pipe->ar;
  560. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  561. struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
  562. unsigned int nentries_mask = dest_ring->nentries_mask;
  563. unsigned int write_index = dest_ring->write_index;
  564. unsigned int sw_index = dest_ring->sw_index;
  565. lockdep_assert_held(&ce->ce_lock);
  566. return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
  567. }
  568. EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);
  569. static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
  570. dma_addr_t paddr)
  571. {
  572. struct ath10k *ar = pipe->ar;
  573. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  574. struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
  575. unsigned int nentries_mask = dest_ring->nentries_mask;
  576. unsigned int write_index = dest_ring->write_index;
  577. unsigned int sw_index = dest_ring->sw_index;
  578. struct ce_desc *base = dest_ring->base_addr_owner_space;
  579. struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
  580. u32 ctrl_addr = pipe->ctrl_addr;
  581. lockdep_assert_held(&ce->ce_lock);
  582. if ((pipe->id != 5) &&
  583. CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
  584. return -ENOSPC;
  585. desc->addr = __cpu_to_le32(paddr);
  586. desc->nbytes = 0;
  587. dest_ring->per_transfer_context[write_index] = ctx;
  588. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  589. ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
  590. dest_ring->write_index = write_index;
  591. return 0;
  592. }
  593. static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
  594. void *ctx,
  595. dma_addr_t paddr)
  596. {
  597. struct ath10k *ar = pipe->ar;
  598. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  599. struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
  600. unsigned int nentries_mask = dest_ring->nentries_mask;
  601. unsigned int write_index = dest_ring->write_index;
  602. unsigned int sw_index = dest_ring->sw_index;
  603. struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
  604. struct ce_desc_64 *desc =
  605. CE_DEST_RING_TO_DESC_64(base, write_index);
  606. u32 ctrl_addr = pipe->ctrl_addr;
  607. lockdep_assert_held(&ce->ce_lock);
  608. if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
  609. return -ENOSPC;
  610. desc->addr = __cpu_to_le64(paddr);
  611. desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);
  612. desc->nbytes = 0;
  613. dest_ring->per_transfer_context[write_index] = ctx;
  614. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  615. ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
  616. dest_ring->write_index = write_index;
  617. return 0;
  618. }
  619. void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
  620. {
  621. struct ath10k *ar = pipe->ar;
  622. struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
  623. unsigned int nentries_mask = dest_ring->nentries_mask;
  624. unsigned int write_index = dest_ring->write_index;
  625. u32 ctrl_addr = pipe->ctrl_addr;
  626. u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
  627. /* Prevent CE ring stuck issue that will occur when ring is full.
  628. * Make sure that write index is 1 less than read index.
  629. */
  630. if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
  631. nentries -= 1;
  632. write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
  633. ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
  634. dest_ring->write_index = write_index;
  635. }
  636. EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);
  637. int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
  638. dma_addr_t paddr)
  639. {
  640. struct ath10k *ar = pipe->ar;
  641. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  642. int ret;
  643. spin_lock_bh(&ce->ce_lock);
  644. ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
  645. spin_unlock_bh(&ce->ce_lock);
  646. return ret;
  647. }
  648. EXPORT_SYMBOL(ath10k_ce_rx_post_buf);
  649. /*
  650. * Guts of ath10k_ce_completed_recv_next.
  651. * The caller takes responsibility for any necessary locking.
  652. */
  653. static int
  654. _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
  655. void **per_transfer_contextp,
  656. unsigned int *nbytesp)
  657. {
  658. struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
  659. unsigned int nentries_mask = dest_ring->nentries_mask;
  660. unsigned int sw_index = dest_ring->sw_index;
  661. struct ce_desc *base = dest_ring->base_addr_owner_space;
  662. struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
  663. struct ce_desc sdesc;
  664. u16 nbytes;
  665. /* Copy in one go for performance reasons */
  666. sdesc = *desc;
  667. nbytes = __le16_to_cpu(sdesc.nbytes);
  668. if (nbytes == 0) {
  669. /*
  670. * This closes a relatively unusual race where the Host
  671. * sees the updated DRRI before the update to the
  672. * corresponding descriptor has completed. We treat this
  673. * as a descriptor that is not yet done.
  674. */
  675. return -EIO;
  676. }
  677. desc->nbytes = 0;
  678. /* Return data from completed destination descriptor */
  679. *nbytesp = nbytes;
  680. if (per_transfer_contextp)
  681. *per_transfer_contextp =
  682. dest_ring->per_transfer_context[sw_index];
  683. /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
  684. * So update transfer context all CEs except CE5.
  685. */
  686. if (ce_state->id != 5)
  687. dest_ring->per_transfer_context[sw_index] = NULL;
  688. /* Update sw_index */
  689. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  690. dest_ring->sw_index = sw_index;
  691. return 0;
  692. }
  693. static int
  694. _ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
  695. void **per_transfer_contextp,
  696. unsigned int *nbytesp)
  697. {
  698. struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
  699. unsigned int nentries_mask = dest_ring->nentries_mask;
  700. unsigned int sw_index = dest_ring->sw_index;
  701. struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
  702. struct ce_desc_64 *desc =
  703. CE_DEST_RING_TO_DESC_64(base, sw_index);
  704. struct ce_desc_64 sdesc;
  705. u16 nbytes;
  706. /* Copy in one go for performance reasons */
  707. sdesc = *desc;
  708. nbytes = __le16_to_cpu(sdesc.nbytes);
  709. if (nbytes == 0) {
  710. /* This closes a relatively unusual race where the Host
  711. * sees the updated DRRI before the update to the
  712. * corresponding descriptor has completed. We treat this
  713. * as a descriptor that is not yet done.
  714. */
  715. return -EIO;
  716. }
  717. desc->nbytes = 0;
  718. /* Return data from completed destination descriptor */
  719. *nbytesp = nbytes;
  720. if (per_transfer_contextp)
  721. *per_transfer_contextp =
  722. dest_ring->per_transfer_context[sw_index];
  723. /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
  724. * So update transfer context all CEs except CE5.
  725. */
  726. if (ce_state->id != 5)
  727. dest_ring->per_transfer_context[sw_index] = NULL;
  728. /* Update sw_index */
  729. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  730. dest_ring->sw_index = sw_index;
  731. return 0;
  732. }
  733. int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
  734. void **per_transfer_ctx,
  735. unsigned int *nbytesp)
  736. {
  737. return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
  738. per_transfer_ctx,
  739. nbytesp);
  740. }
  741. EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock);
  742. int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
  743. void **per_transfer_contextp,
  744. unsigned int *nbytesp)
  745. {
  746. struct ath10k *ar = ce_state->ar;
  747. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  748. int ret;
  749. spin_lock_bh(&ce->ce_lock);
  750. ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
  751. per_transfer_contextp,
  752. nbytesp);
  753. spin_unlock_bh(&ce->ce_lock);
  754. return ret;
  755. }
  756. EXPORT_SYMBOL(ath10k_ce_completed_recv_next);
  757. static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
  758. void **per_transfer_contextp,
  759. dma_addr_t *bufferp)
  760. {
  761. struct ath10k_ce_ring *dest_ring;
  762. unsigned int nentries_mask;
  763. unsigned int sw_index;
  764. unsigned int write_index;
  765. int ret;
  766. struct ath10k *ar;
  767. struct ath10k_ce *ce;
  768. dest_ring = ce_state->dest_ring;
  769. if (!dest_ring)
  770. return -EIO;
  771. ar = ce_state->ar;
  772. ce = ath10k_ce_priv(ar);
  773. spin_lock_bh(&ce->ce_lock);
  774. nentries_mask = dest_ring->nentries_mask;
  775. sw_index = dest_ring->sw_index;
  776. write_index = dest_ring->write_index;
  777. if (write_index != sw_index) {
  778. struct ce_desc *base = dest_ring->base_addr_owner_space;
  779. struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
  780. /* Return data from completed destination descriptor */
  781. *bufferp = __le32_to_cpu(desc->addr);
  782. if (per_transfer_contextp)
  783. *per_transfer_contextp =
  784. dest_ring->per_transfer_context[sw_index];
  785. /* sanity */
  786. dest_ring->per_transfer_context[sw_index] = NULL;
  787. desc->nbytes = 0;
  788. /* Update sw_index */
  789. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  790. dest_ring->sw_index = sw_index;
  791. ret = 0;
  792. } else {
  793. ret = -EIO;
  794. }
  795. spin_unlock_bh(&ce->ce_lock);
  796. return ret;
  797. }
  798. static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
  799. void **per_transfer_contextp,
  800. dma_addr_t *bufferp)
  801. {
  802. struct ath10k_ce_ring *dest_ring;
  803. unsigned int nentries_mask;
  804. unsigned int sw_index;
  805. unsigned int write_index;
  806. int ret;
  807. struct ath10k *ar;
  808. struct ath10k_ce *ce;
  809. dest_ring = ce_state->dest_ring;
  810. if (!dest_ring)
  811. return -EIO;
  812. ar = ce_state->ar;
  813. ce = ath10k_ce_priv(ar);
  814. spin_lock_bh(&ce->ce_lock);
  815. nentries_mask = dest_ring->nentries_mask;
  816. sw_index = dest_ring->sw_index;
  817. write_index = dest_ring->write_index;
  818. if (write_index != sw_index) {
  819. struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
  820. struct ce_desc_64 *desc =
  821. CE_DEST_RING_TO_DESC_64(base, sw_index);
  822. /* Return data from completed destination descriptor */
  823. *bufferp = __le64_to_cpu(desc->addr);
  824. if (per_transfer_contextp)
  825. *per_transfer_contextp =
  826. dest_ring->per_transfer_context[sw_index];
  827. /* sanity */
  828. dest_ring->per_transfer_context[sw_index] = NULL;
  829. desc->nbytes = 0;
  830. /* Update sw_index */
  831. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  832. dest_ring->sw_index = sw_index;
  833. ret = 0;
  834. } else {
  835. ret = -EIO;
  836. }
  837. spin_unlock_bh(&ce->ce_lock);
  838. return ret;
  839. }
  840. int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
  841. void **per_transfer_contextp,
  842. dma_addr_t *bufferp)
  843. {
  844. return ce_state->ops->ce_revoke_recv_next(ce_state,
  845. per_transfer_contextp,
  846. bufferp);
  847. }
  848. EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
  849. /*
  850. * Guts of ath10k_ce_completed_send_next.
  851. * The caller takes responsibility for any necessary locking.
  852. */
  853. int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
  854. void **per_transfer_contextp)
  855. {
  856. struct ath10k_ce_ring *src_ring = ce_state->src_ring;
  857. u32 ctrl_addr = ce_state->ctrl_addr;
  858. struct ath10k *ar = ce_state->ar;
  859. unsigned int nentries_mask = src_ring->nentries_mask;
  860. unsigned int sw_index = src_ring->sw_index;
  861. unsigned int read_index;
  862. struct ce_desc *desc;
  863. if (src_ring->hw_index == sw_index) {
  864. /*
  865. * The SW completion index has caught up with the cached
  866. * version of the HW completion index.
  867. * Update the cached HW completion index to see whether
  868. * the SW has really caught up to the HW, or if the cached
  869. * value of the HW index has become stale.
  870. */
  871. read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
  872. if (read_index == 0xffffffff)
  873. return -ENODEV;
  874. read_index &= nentries_mask;
  875. src_ring->hw_index = read_index;
  876. }
  877. if (ar->hw_params.rri_on_ddr)
  878. read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
  879. else
  880. read_index = src_ring->hw_index;
  881. if (read_index == sw_index)
  882. return -EIO;
  883. if (per_transfer_contextp)
  884. *per_transfer_contextp =
  885. src_ring->per_transfer_context[sw_index];
  886. /* sanity */
  887. src_ring->per_transfer_context[sw_index] = NULL;
  888. desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
  889. sw_index);
  890. desc->nbytes = 0;
  891. /* Update sw_index */
  892. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  893. src_ring->sw_index = sw_index;
  894. return 0;
  895. }
  896. EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
  897. static void ath10k_ce_extract_desc_data(struct ath10k *ar,
  898. struct ath10k_ce_ring *src_ring,
  899. u32 sw_index,
  900. dma_addr_t *bufferp,
  901. u32 *nbytesp,
  902. u32 *transfer_idp)
  903. {
  904. struct ce_desc *base = src_ring->base_addr_owner_space;
  905. struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
  906. /* Return data from completed source descriptor */
  907. *bufferp = __le32_to_cpu(desc->addr);
  908. *nbytesp = __le16_to_cpu(desc->nbytes);
  909. *transfer_idp = MS(__le16_to_cpu(desc->flags),
  910. CE_DESC_FLAGS_META_DATA);
  911. }
  912. static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
  913. struct ath10k_ce_ring *src_ring,
  914. u32 sw_index,
  915. dma_addr_t *bufferp,
  916. u32 *nbytesp,
  917. u32 *transfer_idp)
  918. {
  919. struct ce_desc_64 *base = src_ring->base_addr_owner_space;
  920. struct ce_desc_64 *desc =
  921. CE_SRC_RING_TO_DESC_64(base, sw_index);
  922. /* Return data from completed source descriptor */
  923. *bufferp = __le64_to_cpu(desc->addr);
  924. *nbytesp = __le16_to_cpu(desc->nbytes);
  925. *transfer_idp = MS(__le16_to_cpu(desc->flags),
  926. CE_DESC_FLAGS_META_DATA);
  927. }
  928. /* NB: Modeled after ath10k_ce_completed_send_next */
  929. int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
  930. void **per_transfer_contextp,
  931. dma_addr_t *bufferp,
  932. unsigned int *nbytesp,
  933. unsigned int *transfer_idp)
  934. {
  935. struct ath10k_ce_ring *src_ring;
  936. unsigned int nentries_mask;
  937. unsigned int sw_index;
  938. unsigned int write_index;
  939. int ret;
  940. struct ath10k *ar;
  941. struct ath10k_ce *ce;
  942. src_ring = ce_state->src_ring;
  943. if (!src_ring)
  944. return -EIO;
  945. ar = ce_state->ar;
  946. ce = ath10k_ce_priv(ar);
  947. spin_lock_bh(&ce->ce_lock);
  948. nentries_mask = src_ring->nentries_mask;
  949. sw_index = src_ring->sw_index;
  950. write_index = src_ring->write_index;
  951. if (write_index != sw_index) {
  952. ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
  953. bufferp, nbytesp,
  954. transfer_idp);
  955. if (per_transfer_contextp)
  956. *per_transfer_contextp =
  957. src_ring->per_transfer_context[sw_index];
  958. /* sanity */
  959. src_ring->per_transfer_context[sw_index] = NULL;
  960. /* Update sw_index */
  961. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  962. src_ring->sw_index = sw_index;
  963. ret = 0;
  964. } else {
  965. ret = -EIO;
  966. }
  967. spin_unlock_bh(&ce->ce_lock);
  968. return ret;
  969. }
  970. EXPORT_SYMBOL(ath10k_ce_cancel_send_next);
  971. int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
  972. void **per_transfer_contextp)
  973. {
  974. struct ath10k *ar = ce_state->ar;
  975. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  976. int ret;
  977. spin_lock_bh(&ce->ce_lock);
  978. ret = ath10k_ce_completed_send_next_nolock(ce_state,
  979. per_transfer_contextp);
  980. spin_unlock_bh(&ce->ce_lock);
  981. return ret;
  982. }
  983. EXPORT_SYMBOL(ath10k_ce_completed_send_next);
  984. /*
  985. * Guts of interrupt handler for per-engine interrupts on a particular CE.
  986. *
  987. * Invokes registered callbacks for recv_complete,
  988. * send_complete, and watermarks.
  989. */
  990. void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
  991. {
  992. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  993. struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
  994. struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
  995. u32 ctrl_addr = ce_state->ctrl_addr;
  996. spin_lock_bh(&ce->ce_lock);
  997. /* Clear the copy-complete interrupts that will be handled here. */
  998. ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
  999. wm_regs->cc_mask);
  1000. spin_unlock_bh(&ce->ce_lock);
  1001. if (ce_state->recv_cb)
  1002. ce_state->recv_cb(ce_state);
  1003. if (ce_state->send_cb)
  1004. ce_state->send_cb(ce_state);
  1005. spin_lock_bh(&ce->ce_lock);
  1006. /*
  1007. * Misc CE interrupts are not being handled, but still need
  1008. * to be cleared.
  1009. */
  1010. ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
  1011. spin_unlock_bh(&ce->ce_lock);
  1012. }
  1013. EXPORT_SYMBOL(ath10k_ce_per_engine_service);
  1014. /*
  1015. * Handler for per-engine interrupts on ALL active CEs.
  1016. * This is used in cases where the system is sharing a
  1017. * single interrput for all CEs
  1018. */
  1019. void ath10k_ce_per_engine_service_any(struct ath10k *ar)
  1020. {
  1021. int ce_id;
  1022. u32 intr_summary;
  1023. intr_summary = ath10k_ce_interrupt_summary(ar);
  1024. for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
  1025. if (intr_summary & (1 << ce_id))
  1026. intr_summary &= ~(1 << ce_id);
  1027. else
  1028. /* no intr pending on this CE */
  1029. continue;
  1030. ath10k_ce_per_engine_service(ar, ce_id);
  1031. }
  1032. }
  1033. EXPORT_SYMBOL(ath10k_ce_per_engine_service_any);
  1034. /*
  1035. * Adjust interrupts for the copy complete handler.
  1036. * If it's needed for either send or recv, then unmask
  1037. * this interrupt; otherwise, mask it.
  1038. *
  1039. * Called with ce_lock held.
  1040. */
  1041. static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
  1042. {
  1043. u32 ctrl_addr = ce_state->ctrl_addr;
  1044. struct ath10k *ar = ce_state->ar;
  1045. bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
  1046. if ((!disable_copy_compl_intr) &&
  1047. (ce_state->send_cb || ce_state->recv_cb))
  1048. ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
  1049. else
  1050. ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
  1051. ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
  1052. }
  1053. int ath10k_ce_disable_interrupts(struct ath10k *ar)
  1054. {
  1055. int ce_id;
  1056. for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
  1057. u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
  1058. ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
  1059. ath10k_ce_error_intr_disable(ar, ctrl_addr);
  1060. ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
  1061. }
  1062. return 0;
  1063. }
  1064. EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
  1065. void ath10k_ce_enable_interrupts(struct ath10k *ar)
  1066. {
  1067. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  1068. int ce_id;
  1069. struct ath10k_ce_pipe *ce_state;
  1070. /* Skip the last copy engine, CE7 the diagnostic window, as that
  1071. * uses polling and isn't initialized for interrupts.
  1072. */
  1073. for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) {
  1074. ce_state = &ce->ce_states[ce_id];
  1075. ath10k_ce_per_engine_handler_adjust(ce_state);
  1076. }
  1077. }
  1078. EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
  1079. static int ath10k_ce_init_src_ring(struct ath10k *ar,
  1080. unsigned int ce_id,
  1081. const struct ce_attr *attr)
  1082. {
  1083. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  1084. struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
  1085. struct ath10k_ce_ring *src_ring = ce_state->src_ring;
  1086. u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
  1087. nentries = roundup_pow_of_two(attr->src_nentries);
  1088. if (ar->hw_params.target_64bit)
  1089. memset(src_ring->base_addr_owner_space, 0,
  1090. nentries * sizeof(struct ce_desc_64));
  1091. else
  1092. memset(src_ring->base_addr_owner_space, 0,
  1093. nentries * sizeof(struct ce_desc));
  1094. src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
  1095. src_ring->sw_index &= src_ring->nentries_mask;
  1096. src_ring->hw_index = src_ring->sw_index;
  1097. src_ring->write_index =
  1098. ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
  1099. src_ring->write_index &= src_ring->nentries_mask;
  1100. ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
  1101. src_ring->base_addr_ce_space);
  1102. ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
  1103. ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
  1104. ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
  1105. ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
  1106. ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
  1107. ath10k_dbg(ar, ATH10K_DBG_BOOT,
  1108. "boot init ce src ring id %d entries %d base_addr %pK\n",
  1109. ce_id, nentries, src_ring->base_addr_owner_space);
  1110. return 0;
  1111. }
  1112. static int ath10k_ce_init_dest_ring(struct ath10k *ar,
  1113. unsigned int ce_id,
  1114. const struct ce_attr *attr)
  1115. {
  1116. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  1117. struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
  1118. struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
  1119. u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
  1120. nentries = roundup_pow_of_two(attr->dest_nentries);
  1121. if (ar->hw_params.target_64bit)
  1122. memset(dest_ring->base_addr_owner_space, 0,
  1123. nentries * sizeof(struct ce_desc_64));
  1124. else
  1125. memset(dest_ring->base_addr_owner_space, 0,
  1126. nentries * sizeof(struct ce_desc));
  1127. dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
  1128. dest_ring->sw_index &= dest_ring->nentries_mask;
  1129. dest_ring->write_index =
  1130. ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
  1131. dest_ring->write_index &= dest_ring->nentries_mask;
  1132. ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
  1133. dest_ring->base_addr_ce_space);
  1134. ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
  1135. ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
  1136. ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
  1137. ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
  1138. ath10k_dbg(ar, ATH10K_DBG_BOOT,
  1139. "boot ce dest ring id %d entries %d base_addr %pK\n",
  1140. ce_id, nentries, dest_ring->base_addr_owner_space);
  1141. return 0;
  1142. }
  1143. static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
  1144. struct ath10k_ce_ring *src_ring,
  1145. u32 nentries)
  1146. {
  1147. src_ring->shadow_base_unaligned = kcalloc(nentries,
  1148. sizeof(struct ce_desc),
  1149. GFP_KERNEL);
  1150. if (!src_ring->shadow_base_unaligned)
  1151. return -ENOMEM;
  1152. src_ring->shadow_base = (struct ce_desc *)
  1153. PTR_ALIGN(src_ring->shadow_base_unaligned,
  1154. CE_DESC_RING_ALIGN);
  1155. return 0;
  1156. }
  1157. static struct ath10k_ce_ring *
  1158. ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
  1159. const struct ce_attr *attr)
  1160. {
  1161. struct ath10k_ce_ring *src_ring;
  1162. u32 nentries = attr->src_nentries;
  1163. dma_addr_t base_addr;
  1164. int ret;
  1165. nentries = roundup_pow_of_two(nentries);
  1166. src_ring = kzalloc(sizeof(*src_ring) +
  1167. (nentries *
  1168. sizeof(*src_ring->per_transfer_context)),
  1169. GFP_KERNEL);
  1170. if (src_ring == NULL)
  1171. return ERR_PTR(-ENOMEM);
  1172. src_ring->nentries = nentries;
  1173. src_ring->nentries_mask = nentries - 1;
  1174. /*
  1175. * Legacy platforms that do not support cache
  1176. * coherent DMA are unsupported
  1177. */
  1178. src_ring->base_addr_owner_space_unaligned =
  1179. dma_alloc_coherent(ar->dev,
  1180. (nentries * sizeof(struct ce_desc) +
  1181. CE_DESC_RING_ALIGN),
  1182. &base_addr, GFP_KERNEL);
  1183. if (!src_ring->base_addr_owner_space_unaligned) {
  1184. kfree(src_ring);
  1185. return ERR_PTR(-ENOMEM);
  1186. }
  1187. src_ring->base_addr_ce_space_unaligned = base_addr;
  1188. src_ring->base_addr_owner_space =
  1189. PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
  1190. CE_DESC_RING_ALIGN);
  1191. src_ring->base_addr_ce_space =
  1192. ALIGN(src_ring->base_addr_ce_space_unaligned,
  1193. CE_DESC_RING_ALIGN);
  1194. if (ar->hw_params.shadow_reg_support) {
  1195. ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
  1196. if (ret) {
  1197. dma_free_coherent(ar->dev,
  1198. (nentries * sizeof(struct ce_desc) +
  1199. CE_DESC_RING_ALIGN),
  1200. src_ring->base_addr_owner_space_unaligned,
  1201. base_addr);
  1202. kfree(src_ring);
  1203. return ERR_PTR(ret);
  1204. }
  1205. }
  1206. return src_ring;
  1207. }
  1208. static struct ath10k_ce_ring *
  1209. ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
  1210. const struct ce_attr *attr)
  1211. {
  1212. struct ath10k_ce_ring *src_ring;
  1213. u32 nentries = attr->src_nentries;
  1214. dma_addr_t base_addr;
  1215. int ret;
  1216. nentries = roundup_pow_of_two(nentries);
  1217. src_ring = kzalloc(sizeof(*src_ring) +
  1218. (nentries *
  1219. sizeof(*src_ring->per_transfer_context)),
  1220. GFP_KERNEL);
  1221. if (!src_ring)
  1222. return ERR_PTR(-ENOMEM);
  1223. src_ring->nentries = nentries;
  1224. src_ring->nentries_mask = nentries - 1;
  1225. /* Legacy platforms that do not support cache
  1226. * coherent DMA are unsupported
  1227. */
  1228. src_ring->base_addr_owner_space_unaligned =
  1229. dma_alloc_coherent(ar->dev,
  1230. (nentries * sizeof(struct ce_desc_64) +
  1231. CE_DESC_RING_ALIGN),
  1232. &base_addr, GFP_KERNEL);
  1233. if (!src_ring->base_addr_owner_space_unaligned) {
  1234. kfree(src_ring);
  1235. return ERR_PTR(-ENOMEM);
  1236. }
  1237. src_ring->base_addr_ce_space_unaligned = base_addr;
  1238. src_ring->base_addr_owner_space =
  1239. PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
  1240. CE_DESC_RING_ALIGN);
  1241. src_ring->base_addr_ce_space =
  1242. ALIGN(src_ring->base_addr_ce_space_unaligned,
  1243. CE_DESC_RING_ALIGN);
  1244. if (ar->hw_params.shadow_reg_support) {
  1245. ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
  1246. if (ret) {
  1247. dma_free_coherent(ar->dev,
  1248. (nentries * sizeof(struct ce_desc) +
  1249. CE_DESC_RING_ALIGN),
  1250. src_ring->base_addr_owner_space_unaligned,
  1251. base_addr);
  1252. kfree(src_ring);
  1253. return ERR_PTR(ret);
  1254. }
  1255. }
  1256. return src_ring;
  1257. }
  1258. static struct ath10k_ce_ring *
  1259. ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
  1260. const struct ce_attr *attr)
  1261. {
  1262. struct ath10k_ce_ring *dest_ring;
  1263. u32 nentries;
  1264. dma_addr_t base_addr;
  1265. nentries = roundup_pow_of_two(attr->dest_nentries);
  1266. dest_ring = kzalloc(sizeof(*dest_ring) +
  1267. (nentries *
  1268. sizeof(*dest_ring->per_transfer_context)),
  1269. GFP_KERNEL);
  1270. if (dest_ring == NULL)
  1271. return ERR_PTR(-ENOMEM);
  1272. dest_ring->nentries = nentries;
  1273. dest_ring->nentries_mask = nentries - 1;
  1274. /*
  1275. * Legacy platforms that do not support cache
  1276. * coherent DMA are unsupported
  1277. */
  1278. dest_ring->base_addr_owner_space_unaligned =
  1279. dma_zalloc_coherent(ar->dev,
  1280. (nentries * sizeof(struct ce_desc) +
  1281. CE_DESC_RING_ALIGN),
  1282. &base_addr, GFP_KERNEL);
  1283. if (!dest_ring->base_addr_owner_space_unaligned) {
  1284. kfree(dest_ring);
  1285. return ERR_PTR(-ENOMEM);
  1286. }
  1287. dest_ring->base_addr_ce_space_unaligned = base_addr;
  1288. dest_ring->base_addr_owner_space =
  1289. PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
  1290. CE_DESC_RING_ALIGN);
  1291. dest_ring->base_addr_ce_space =
  1292. ALIGN(dest_ring->base_addr_ce_space_unaligned,
  1293. CE_DESC_RING_ALIGN);
  1294. return dest_ring;
  1295. }
  1296. static struct ath10k_ce_ring *
  1297. ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
  1298. const struct ce_attr *attr)
  1299. {
  1300. struct ath10k_ce_ring *dest_ring;
  1301. u32 nentries;
  1302. dma_addr_t base_addr;
  1303. nentries = roundup_pow_of_two(attr->dest_nentries);
  1304. dest_ring = kzalloc(sizeof(*dest_ring) +
  1305. (nentries *
  1306. sizeof(*dest_ring->per_transfer_context)),
  1307. GFP_KERNEL);
  1308. if (!dest_ring)
  1309. return ERR_PTR(-ENOMEM);
  1310. dest_ring->nentries = nentries;
  1311. dest_ring->nentries_mask = nentries - 1;
  1312. /* Legacy platforms that do not support cache
  1313. * coherent DMA are unsupported
  1314. */
  1315. dest_ring->base_addr_owner_space_unaligned =
  1316. dma_alloc_coherent(ar->dev,
  1317. (nentries * sizeof(struct ce_desc_64) +
  1318. CE_DESC_RING_ALIGN),
  1319. &base_addr, GFP_KERNEL);
  1320. if (!dest_ring->base_addr_owner_space_unaligned) {
  1321. kfree(dest_ring);
  1322. return ERR_PTR(-ENOMEM);
  1323. }
  1324. dest_ring->base_addr_ce_space_unaligned = base_addr;
  1325. /* Correctly initialize memory to 0 to prevent garbage
  1326. * data crashing system when download firmware
  1327. */
  1328. memset(dest_ring->base_addr_owner_space_unaligned, 0,
  1329. nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
  1330. dest_ring->base_addr_owner_space =
  1331. PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
  1332. CE_DESC_RING_ALIGN);
  1333. dest_ring->base_addr_ce_space =
  1334. ALIGN(dest_ring->base_addr_ce_space_unaligned,
  1335. CE_DESC_RING_ALIGN);
  1336. return dest_ring;
  1337. }
  1338. /*
  1339. * Initialize a Copy Engine based on caller-supplied attributes.
  1340. * This may be called once to initialize both source and destination
  1341. * rings or it may be called twice for separate source and destination
  1342. * initialization. It may be that only one side or the other is
  1343. * initialized by software/firmware.
  1344. */
  1345. int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
  1346. const struct ce_attr *attr)
  1347. {
  1348. int ret;
  1349. if (attr->src_nentries) {
  1350. ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
  1351. if (ret) {
  1352. ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
  1353. ce_id, ret);
  1354. return ret;
  1355. }
  1356. }
  1357. if (attr->dest_nentries) {
  1358. ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
  1359. if (ret) {
  1360. ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
  1361. ce_id, ret);
  1362. return ret;
  1363. }
  1364. }
  1365. return 0;
  1366. }
  1367. EXPORT_SYMBOL(ath10k_ce_init_pipe);
  1368. static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
  1369. {
  1370. u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
  1371. ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
  1372. ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
  1373. ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
  1374. ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
  1375. }
  1376. static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
  1377. {
  1378. u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
  1379. ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
  1380. ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
  1381. ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
  1382. }
  1383. void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
  1384. {
  1385. ath10k_ce_deinit_src_ring(ar, ce_id);
  1386. ath10k_ce_deinit_dest_ring(ar, ce_id);
  1387. }
  1388. EXPORT_SYMBOL(ath10k_ce_deinit_pipe);
  1389. static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
  1390. {
  1391. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  1392. struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
  1393. if (ce_state->src_ring) {
  1394. if (ar->hw_params.shadow_reg_support)
  1395. kfree(ce_state->src_ring->shadow_base_unaligned);
  1396. dma_free_coherent(ar->dev,
  1397. (ce_state->src_ring->nentries *
  1398. sizeof(struct ce_desc) +
  1399. CE_DESC_RING_ALIGN),
  1400. ce_state->src_ring->base_addr_owner_space,
  1401. ce_state->src_ring->base_addr_ce_space);
  1402. kfree(ce_state->src_ring);
  1403. }
  1404. if (ce_state->dest_ring) {
  1405. dma_free_coherent(ar->dev,
  1406. (ce_state->dest_ring->nentries *
  1407. sizeof(struct ce_desc) +
  1408. CE_DESC_RING_ALIGN),
  1409. ce_state->dest_ring->base_addr_owner_space,
  1410. ce_state->dest_ring->base_addr_ce_space);
  1411. kfree(ce_state->dest_ring);
  1412. }
  1413. ce_state->src_ring = NULL;
  1414. ce_state->dest_ring = NULL;
  1415. }
  1416. static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
  1417. {
  1418. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  1419. struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
  1420. if (ce_state->src_ring) {
  1421. if (ar->hw_params.shadow_reg_support)
  1422. kfree(ce_state->src_ring->shadow_base_unaligned);
  1423. dma_free_coherent(ar->dev,
  1424. (ce_state->src_ring->nentries *
  1425. sizeof(struct ce_desc_64) +
  1426. CE_DESC_RING_ALIGN),
  1427. ce_state->src_ring->base_addr_owner_space,
  1428. ce_state->src_ring->base_addr_ce_space);
  1429. kfree(ce_state->src_ring);
  1430. }
  1431. if (ce_state->dest_ring) {
  1432. dma_free_coherent(ar->dev,
  1433. (ce_state->dest_ring->nentries *
  1434. sizeof(struct ce_desc_64) +
  1435. CE_DESC_RING_ALIGN),
  1436. ce_state->dest_ring->base_addr_owner_space,
  1437. ce_state->dest_ring->base_addr_ce_space);
  1438. kfree(ce_state->dest_ring);
  1439. }
  1440. ce_state->src_ring = NULL;
  1441. ce_state->dest_ring = NULL;
  1442. }
  1443. void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
  1444. {
  1445. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  1446. struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
  1447. ce_state->ops->ce_free_pipe(ar, ce_id);
  1448. }
  1449. EXPORT_SYMBOL(ath10k_ce_free_pipe);
  1450. void ath10k_ce_dump_registers(struct ath10k *ar,
  1451. struct ath10k_fw_crash_data *crash_data)
  1452. {
  1453. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  1454. struct ath10k_ce_crash_data ce_data;
  1455. u32 addr, id;
  1456. lockdep_assert_held(&ar->data_lock);
  1457. ath10k_err(ar, "Copy Engine register dump:\n");
  1458. spin_lock_bh(&ce->ce_lock);
  1459. for (id = 0; id < CE_COUNT; id++) {
  1460. addr = ath10k_ce_base_address(ar, id);
  1461. ce_data.base_addr = cpu_to_le32(addr);
  1462. ce_data.src_wr_idx =
  1463. cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
  1464. ce_data.src_r_idx =
  1465. cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
  1466. ce_data.dst_wr_idx =
  1467. cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
  1468. ce_data.dst_r_idx =
  1469. cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
  1470. if (crash_data)
  1471. crash_data->ce_crash_data[id] = ce_data;
  1472. ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
  1473. le32_to_cpu(ce_data.base_addr),
  1474. le32_to_cpu(ce_data.src_wr_idx),
  1475. le32_to_cpu(ce_data.src_r_idx),
  1476. le32_to_cpu(ce_data.dst_wr_idx),
  1477. le32_to_cpu(ce_data.dst_r_idx));
  1478. }
  1479. spin_unlock_bh(&ce->ce_lock);
  1480. }
  1481. EXPORT_SYMBOL(ath10k_ce_dump_registers);
  1482. static const struct ath10k_ce_ops ce_ops = {
  1483. .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
  1484. .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
  1485. .ce_rx_post_buf = __ath10k_ce_rx_post_buf,
  1486. .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
  1487. .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
  1488. .ce_extract_desc_data = ath10k_ce_extract_desc_data,
  1489. .ce_free_pipe = _ath10k_ce_free_pipe,
  1490. .ce_send_nolock = _ath10k_ce_send_nolock,
  1491. };
  1492. static const struct ath10k_ce_ops ce_64_ops = {
  1493. .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
  1494. .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
  1495. .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
  1496. .ce_completed_recv_next_nolock =
  1497. _ath10k_ce_completed_recv_next_nolock_64,
  1498. .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
  1499. .ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
  1500. .ce_free_pipe = _ath10k_ce_free_pipe_64,
  1501. .ce_send_nolock = _ath10k_ce_send_nolock_64,
  1502. };
  1503. static void ath10k_ce_set_ops(struct ath10k *ar,
  1504. struct ath10k_ce_pipe *ce_state)
  1505. {
  1506. switch (ar->hw_rev) {
  1507. case ATH10K_HW_WCN3990:
  1508. ce_state->ops = &ce_64_ops;
  1509. break;
  1510. default:
  1511. ce_state->ops = &ce_ops;
  1512. break;
  1513. }
  1514. }
  1515. int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
  1516. const struct ce_attr *attr)
  1517. {
  1518. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  1519. struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
  1520. int ret;
  1521. ath10k_ce_set_ops(ar, ce_state);
  1522. /* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
  1523. * additional TX locking checks.
  1524. *
  1525. * For the lack of a better place do the check here.
  1526. */
  1527. BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
  1528. (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
  1529. BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
  1530. (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
  1531. BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
  1532. (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
  1533. ce_state->ar = ar;
  1534. ce_state->id = ce_id;
  1535. ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
  1536. ce_state->attr_flags = attr->flags;
  1537. ce_state->src_sz_max = attr->src_sz_max;
  1538. if (attr->src_nentries)
  1539. ce_state->send_cb = attr->send_cb;
  1540. if (attr->dest_nentries)
  1541. ce_state->recv_cb = attr->recv_cb;
  1542. if (attr->src_nentries) {
  1543. ce_state->src_ring =
  1544. ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
  1545. if (IS_ERR(ce_state->src_ring)) {
  1546. ret = PTR_ERR(ce_state->src_ring);
  1547. ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
  1548. ce_id, ret);
  1549. ce_state->src_ring = NULL;
  1550. return ret;
  1551. }
  1552. }
  1553. if (attr->dest_nentries) {
  1554. ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
  1555. ce_id,
  1556. attr);
  1557. if (IS_ERR(ce_state->dest_ring)) {
  1558. ret = PTR_ERR(ce_state->dest_ring);
  1559. ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
  1560. ce_id, ret);
  1561. ce_state->dest_ring = NULL;
  1562. return ret;
  1563. }
  1564. }
  1565. return 0;
  1566. }
  1567. EXPORT_SYMBOL(ath10k_ce_alloc_pipe);
  1568. void ath10k_ce_alloc_rri(struct ath10k *ar)
  1569. {
  1570. int i;
  1571. u32 value;
  1572. u32 ctrl1_regs;
  1573. u32 ce_base_addr;
  1574. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  1575. ce->vaddr_rri = dma_alloc_coherent(ar->dev,
  1576. (CE_COUNT * sizeof(u32)),
  1577. &ce->paddr_rri, GFP_KERNEL);
  1578. if (!ce->vaddr_rri)
  1579. return;
  1580. ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low,
  1581. lower_32_bits(ce->paddr_rri));
  1582. ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
  1583. (upper_32_bits(ce->paddr_rri) &
  1584. CE_DESC_FLAGS_GET_MASK));
  1585. for (i = 0; i < CE_COUNT; i++) {
  1586. ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
  1587. ce_base_addr = ath10k_ce_base_address(ar, i);
  1588. value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs);
  1589. value |= ar->hw_ce_regs->upd->mask;
  1590. ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
  1591. }
  1592. memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32));
  1593. }
  1594. EXPORT_SYMBOL(ath10k_ce_alloc_rri);
  1595. void ath10k_ce_free_rri(struct ath10k *ar)
  1596. {
  1597. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  1598. dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
  1599. ce->vaddr_rri,
  1600. ce->paddr_rri);
  1601. }
  1602. EXPORT_SYMBOL(ath10k_ce_free_rri);