pci.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <linux/pci.h>
  18. #include <linux/module.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/bitops.h>
  22. #include "core.h"
  23. #include "debug.h"
  24. #include "targaddrs.h"
  25. #include "bmi.h"
  26. #include "hif.h"
  27. #include "htc.h"
  28. #include "ce.h"
  29. #include "pci.h"
  30. enum ath10k_pci_irq_mode {
  31. ATH10K_PCI_IRQ_AUTO = 0,
  32. ATH10K_PCI_IRQ_LEGACY = 1,
  33. ATH10K_PCI_IRQ_MSI = 2,
  34. };
  35. static unsigned int ath10k_target_ps;
  36. static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  37. module_param(ath10k_target_ps, uint, 0644);
  38. MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
  39. module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  40. MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  41. #define QCA988X_2_0_DEVICE_ID (0x003c)
  42. static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
  43. { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  44. {0}
  45. };
  46. static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
  47. u32 *data);
  48. static void ath10k_pci_process_ce(struct ath10k *ar);
  49. static int ath10k_pci_post_rx(struct ath10k *ar);
  50. static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
  51. int num);
  52. static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
  53. static void ath10k_pci_stop_ce(struct ath10k *ar);
  54. static int ath10k_pci_device_reset(struct ath10k *ar);
  55. static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
  56. static int ath10k_pci_init_irq(struct ath10k *ar);
  57. static int ath10k_pci_deinit_irq(struct ath10k *ar);
  58. static int ath10k_pci_request_irq(struct ath10k *ar);
  59. static void ath10k_pci_free_irq(struct ath10k *ar);
  60. static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
  61. struct ath10k_ce_pipe *rx_pipe,
  62. struct bmi_xfer *xfer);
  63. static void ath10k_pci_cleanup_ce(struct ath10k *ar);
  64. static const struct ce_attr host_ce_config_wlan[] = {
  65. /* CE0: host->target HTC control and raw streams */
  66. {
  67. .flags = CE_ATTR_FLAGS,
  68. .src_nentries = 16,
  69. .src_sz_max = 256,
  70. .dest_nentries = 0,
  71. },
  72. /* CE1: target->host HTT + HTC control */
  73. {
  74. .flags = CE_ATTR_FLAGS,
  75. .src_nentries = 0,
  76. .src_sz_max = 512,
  77. .dest_nentries = 512,
  78. },
  79. /* CE2: target->host WMI */
  80. {
  81. .flags = CE_ATTR_FLAGS,
  82. .src_nentries = 0,
  83. .src_sz_max = 2048,
  84. .dest_nentries = 32,
  85. },
  86. /* CE3: host->target WMI */
  87. {
  88. .flags = CE_ATTR_FLAGS,
  89. .src_nentries = 32,
  90. .src_sz_max = 2048,
  91. .dest_nentries = 0,
  92. },
  93. /* CE4: host->target HTT */
  94. {
  95. .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  96. .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
  97. .src_sz_max = 256,
  98. .dest_nentries = 0,
  99. },
  100. /* CE5: unused */
  101. {
  102. .flags = CE_ATTR_FLAGS,
  103. .src_nentries = 0,
  104. .src_sz_max = 0,
  105. .dest_nentries = 0,
  106. },
  107. /* CE6: target autonomous hif_memcpy */
  108. {
  109. .flags = CE_ATTR_FLAGS,
  110. .src_nentries = 0,
  111. .src_sz_max = 0,
  112. .dest_nentries = 0,
  113. },
  114. /* CE7: ce_diag, the Diagnostic Window */
  115. {
  116. .flags = CE_ATTR_FLAGS,
  117. .src_nentries = 2,
  118. .src_sz_max = DIAG_TRANSFER_LIMIT,
  119. .dest_nentries = 2,
  120. },
  121. };
  122. /* Target firmware's Copy Engine configuration. */
  123. static const struct ce_pipe_config target_ce_config_wlan[] = {
  124. /* CE0: host->target HTC control and raw streams */
  125. {
  126. .pipenum = 0,
  127. .pipedir = PIPEDIR_OUT,
  128. .nentries = 32,
  129. .nbytes_max = 256,
  130. .flags = CE_ATTR_FLAGS,
  131. .reserved = 0,
  132. },
  133. /* CE1: target->host HTT + HTC control */
  134. {
  135. .pipenum = 1,
  136. .pipedir = PIPEDIR_IN,
  137. .nentries = 32,
  138. .nbytes_max = 512,
  139. .flags = CE_ATTR_FLAGS,
  140. .reserved = 0,
  141. },
  142. /* CE2: target->host WMI */
  143. {
  144. .pipenum = 2,
  145. .pipedir = PIPEDIR_IN,
  146. .nentries = 32,
  147. .nbytes_max = 2048,
  148. .flags = CE_ATTR_FLAGS,
  149. .reserved = 0,
  150. },
  151. /* CE3: host->target WMI */
  152. {
  153. .pipenum = 3,
  154. .pipedir = PIPEDIR_OUT,
  155. .nentries = 32,
  156. .nbytes_max = 2048,
  157. .flags = CE_ATTR_FLAGS,
  158. .reserved = 0,
  159. },
  160. /* CE4: host->target HTT */
  161. {
  162. .pipenum = 4,
  163. .pipedir = PIPEDIR_OUT,
  164. .nentries = 256,
  165. .nbytes_max = 256,
  166. .flags = CE_ATTR_FLAGS,
  167. .reserved = 0,
  168. },
  169. /* NB: 50% of src nentries, since tx has 2 frags */
  170. /* CE5: unused */
  171. {
  172. .pipenum = 5,
  173. .pipedir = PIPEDIR_OUT,
  174. .nentries = 32,
  175. .nbytes_max = 2048,
  176. .flags = CE_ATTR_FLAGS,
  177. .reserved = 0,
  178. },
  179. /* CE6: Reserved for target autonomous hif_memcpy */
  180. {
  181. .pipenum = 6,
  182. .pipedir = PIPEDIR_INOUT,
  183. .nentries = 32,
  184. .nbytes_max = 4096,
  185. .flags = CE_ATTR_FLAGS,
  186. .reserved = 0,
  187. },
  188. /* CE7 used only by Host */
  189. };
  190. static bool ath10k_pci_irq_pending(struct ath10k *ar)
  191. {
  192. u32 cause;
  193. /* Check if the shared legacy irq is for us */
  194. cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  195. PCIE_INTR_CAUSE_ADDRESS);
  196. if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
  197. return true;
  198. return false;
  199. }
  200. static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
  201. {
  202. /* IMPORTANT: INTR_CLR register has to be set after
  203. * INTR_ENABLE is set to 0, otherwise interrupt can not be
  204. * really cleared. */
  205. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
  206. 0);
  207. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
  208. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  209. /* IMPORTANT: this extra read transaction is required to
  210. * flush the posted write buffer. */
  211. (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  212. PCIE_INTR_ENABLE_ADDRESS);
  213. }
  214. static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
  215. {
  216. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
  217. PCIE_INTR_ENABLE_ADDRESS,
  218. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  219. /* IMPORTANT: this extra read transaction is required to
  220. * flush the posted write buffer. */
  221. (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  222. PCIE_INTR_ENABLE_ADDRESS);
  223. }
  224. static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
  225. {
  226. struct ath10k *ar = arg;
  227. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  228. if (ar_pci->num_msi_intrs == 0) {
  229. if (!ath10k_pci_irq_pending(ar))
  230. return IRQ_NONE;
  231. ath10k_pci_disable_and_clear_legacy_irq(ar);
  232. }
  233. tasklet_schedule(&ar_pci->early_irq_tasklet);
  234. return IRQ_HANDLED;
  235. }
  236. static int ath10k_pci_request_early_irq(struct ath10k *ar)
  237. {
  238. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  239. int ret;
  240. /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
  241. * interrupt from irq vector is triggered in all cases for FW
  242. * indication/errors */
  243. ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
  244. IRQF_SHARED, "ath10k_pci (early)", ar);
  245. if (ret) {
  246. ath10k_warn("failed to request early irq: %d\n", ret);
  247. return ret;
  248. }
  249. return 0;
  250. }
  251. static void ath10k_pci_free_early_irq(struct ath10k *ar)
  252. {
  253. free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
  254. }
  255. /*
  256. * Diagnostic read/write access is provided for startup/config/debug usage.
  257. * Caller must guarantee proper alignment, when applicable, and single user
  258. * at any moment.
  259. */
  260. static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
  261. int nbytes)
  262. {
  263. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  264. int ret = 0;
  265. u32 buf;
  266. unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
  267. unsigned int id;
  268. unsigned int flags;
  269. struct ath10k_ce_pipe *ce_diag;
  270. /* Host buffer address in CE space */
  271. u32 ce_data;
  272. dma_addr_t ce_data_base = 0;
  273. void *data_buf = NULL;
  274. int i;
  275. /*
  276. * This code cannot handle reads to non-memory space. Redirect to the
  277. * register read fn but preserve the multi word read capability of
  278. * this fn
  279. */
  280. if (address < DRAM_BASE_ADDRESS) {
  281. if (!IS_ALIGNED(address, 4) ||
  282. !IS_ALIGNED((unsigned long)data, 4))
  283. return -EIO;
  284. while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
  285. ar, address, (u32 *)data)) == 0)) {
  286. nbytes -= sizeof(u32);
  287. address += sizeof(u32);
  288. data += sizeof(u32);
  289. }
  290. return ret;
  291. }
  292. ce_diag = ar_pci->ce_diag;
  293. /*
  294. * Allocate a temporary bounce buffer to hold caller's data
  295. * to be DMA'ed from Target. This guarantees
  296. * 1) 4-byte alignment
  297. * 2) Buffer in DMA-able space
  298. */
  299. orig_nbytes = nbytes;
  300. data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
  301. orig_nbytes,
  302. &ce_data_base);
  303. if (!data_buf) {
  304. ret = -ENOMEM;
  305. goto done;
  306. }
  307. memset(data_buf, 0, orig_nbytes);
  308. remaining_bytes = orig_nbytes;
  309. ce_data = ce_data_base;
  310. while (remaining_bytes) {
  311. nbytes = min_t(unsigned int, remaining_bytes,
  312. DIAG_TRANSFER_LIMIT);
  313. ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
  314. if (ret != 0)
  315. goto done;
  316. /* Request CE to send from Target(!) address to Host buffer */
  317. /*
  318. * The address supplied by the caller is in the
  319. * Target CPU virtual address space.
  320. *
  321. * In order to use this address with the diagnostic CE,
  322. * convert it from Target CPU virtual address space
  323. * to CE address space
  324. */
  325. ath10k_pci_wake(ar);
  326. address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
  327. address);
  328. ath10k_pci_sleep(ar);
  329. ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
  330. 0);
  331. if (ret)
  332. goto done;
  333. i = 0;
  334. while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
  335. &completed_nbytes,
  336. &id) != 0) {
  337. mdelay(1);
  338. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  339. ret = -EBUSY;
  340. goto done;
  341. }
  342. }
  343. if (nbytes != completed_nbytes) {
  344. ret = -EIO;
  345. goto done;
  346. }
  347. if (buf != (u32) address) {
  348. ret = -EIO;
  349. goto done;
  350. }
  351. i = 0;
  352. while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
  353. &completed_nbytes,
  354. &id, &flags) != 0) {
  355. mdelay(1);
  356. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  357. ret = -EBUSY;
  358. goto done;
  359. }
  360. }
  361. if (nbytes != completed_nbytes) {
  362. ret = -EIO;
  363. goto done;
  364. }
  365. if (buf != ce_data) {
  366. ret = -EIO;
  367. goto done;
  368. }
  369. remaining_bytes -= nbytes;
  370. address += nbytes;
  371. ce_data += nbytes;
  372. }
  373. done:
  374. if (ret == 0) {
  375. /* Copy data from allocated DMA buf to caller's buf */
  376. WARN_ON_ONCE(orig_nbytes & 3);
  377. for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
  378. ((u32 *)data)[i] =
  379. __le32_to_cpu(((__le32 *)data_buf)[i]);
  380. }
  381. } else
  382. ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
  383. __func__, address);
  384. if (data_buf)
  385. pci_free_consistent(ar_pci->pdev, orig_nbytes,
  386. data_buf, ce_data_base);
  387. return ret;
  388. }
  389. /* Read 4-byte aligned data from Target memory or register */
  390. static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
  391. u32 *data)
  392. {
  393. /* Assume range doesn't cross this boundary */
  394. if (address >= DRAM_BASE_ADDRESS)
  395. return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
  396. ath10k_pci_wake(ar);
  397. *data = ath10k_pci_read32(ar, address);
  398. ath10k_pci_sleep(ar);
  399. return 0;
  400. }
  401. static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
  402. const void *data, int nbytes)
  403. {
  404. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  405. int ret = 0;
  406. u32 buf;
  407. unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
  408. unsigned int id;
  409. unsigned int flags;
  410. struct ath10k_ce_pipe *ce_diag;
  411. void *data_buf = NULL;
  412. u32 ce_data; /* Host buffer address in CE space */
  413. dma_addr_t ce_data_base = 0;
  414. int i;
  415. ce_diag = ar_pci->ce_diag;
  416. /*
  417. * Allocate a temporary bounce buffer to hold caller's data
  418. * to be DMA'ed to Target. This guarantees
  419. * 1) 4-byte alignment
  420. * 2) Buffer in DMA-able space
  421. */
  422. orig_nbytes = nbytes;
  423. data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
  424. orig_nbytes,
  425. &ce_data_base);
  426. if (!data_buf) {
  427. ret = -ENOMEM;
  428. goto done;
  429. }
  430. /* Copy caller's data to allocated DMA buf */
  431. WARN_ON_ONCE(orig_nbytes & 3);
  432. for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
  433. ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
  434. /*
  435. * The address supplied by the caller is in the
  436. * Target CPU virtual address space.
  437. *
  438. * In order to use this address with the diagnostic CE,
  439. * convert it from
  440. * Target CPU virtual address space
  441. * to
  442. * CE address space
  443. */
  444. ath10k_pci_wake(ar);
  445. address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
  446. ath10k_pci_sleep(ar);
  447. remaining_bytes = orig_nbytes;
  448. ce_data = ce_data_base;
  449. while (remaining_bytes) {
  450. /* FIXME: check cast */
  451. nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
  452. /* Set up to receive directly into Target(!) address */
  453. ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
  454. if (ret != 0)
  455. goto done;
  456. /*
  457. * Request CE to send caller-supplied data that
  458. * was copied to bounce buffer to Target(!) address.
  459. */
  460. ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
  461. nbytes, 0, 0);
  462. if (ret != 0)
  463. goto done;
  464. i = 0;
  465. while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
  466. &completed_nbytes,
  467. &id) != 0) {
  468. mdelay(1);
  469. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  470. ret = -EBUSY;
  471. goto done;
  472. }
  473. }
  474. if (nbytes != completed_nbytes) {
  475. ret = -EIO;
  476. goto done;
  477. }
  478. if (buf != ce_data) {
  479. ret = -EIO;
  480. goto done;
  481. }
  482. i = 0;
  483. while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
  484. &completed_nbytes,
  485. &id, &flags) != 0) {
  486. mdelay(1);
  487. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  488. ret = -EBUSY;
  489. goto done;
  490. }
  491. }
  492. if (nbytes != completed_nbytes) {
  493. ret = -EIO;
  494. goto done;
  495. }
  496. if (buf != address) {
  497. ret = -EIO;
  498. goto done;
  499. }
  500. remaining_bytes -= nbytes;
  501. address += nbytes;
  502. ce_data += nbytes;
  503. }
  504. done:
  505. if (data_buf) {
  506. pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
  507. ce_data_base);
  508. }
  509. if (ret != 0)
  510. ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
  511. address);
  512. return ret;
  513. }
  514. /* Write 4B data to Target memory or register */
  515. static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
  516. u32 data)
  517. {
  518. /* Assume range doesn't cross this boundary */
  519. if (address >= DRAM_BASE_ADDRESS)
  520. return ath10k_pci_diag_write_mem(ar, address, &data,
  521. sizeof(u32));
  522. ath10k_pci_wake(ar);
  523. ath10k_pci_write32(ar, address, data);
  524. ath10k_pci_sleep(ar);
  525. return 0;
  526. }
  527. static bool ath10k_pci_target_is_awake(struct ath10k *ar)
  528. {
  529. void __iomem *mem = ath10k_pci_priv(ar)->mem;
  530. u32 val;
  531. val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
  532. RTC_STATE_ADDRESS);
  533. return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
  534. }
  535. int ath10k_do_pci_wake(struct ath10k *ar)
  536. {
  537. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  538. void __iomem *pci_addr = ar_pci->mem;
  539. int tot_delay = 0;
  540. int curr_delay = 5;
  541. if (atomic_read(&ar_pci->keep_awake_count) == 0) {
  542. /* Force AWAKE */
  543. iowrite32(PCIE_SOC_WAKE_V_MASK,
  544. pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  545. PCIE_SOC_WAKE_ADDRESS);
  546. }
  547. atomic_inc(&ar_pci->keep_awake_count);
  548. if (ar_pci->verified_awake)
  549. return 0;
  550. for (;;) {
  551. if (ath10k_pci_target_is_awake(ar)) {
  552. ar_pci->verified_awake = true;
  553. return 0;
  554. }
  555. if (tot_delay > PCIE_WAKE_TIMEOUT) {
  556. ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
  557. PCIE_WAKE_TIMEOUT,
  558. atomic_read(&ar_pci->keep_awake_count));
  559. return -ETIMEDOUT;
  560. }
  561. udelay(curr_delay);
  562. tot_delay += curr_delay;
  563. if (curr_delay < 50)
  564. curr_delay += 5;
  565. }
  566. }
  567. void ath10k_do_pci_sleep(struct ath10k *ar)
  568. {
  569. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  570. void __iomem *pci_addr = ar_pci->mem;
  571. if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
  572. /* Allow sleep */
  573. ar_pci->verified_awake = false;
  574. iowrite32(PCIE_SOC_WAKE_RESET,
  575. pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  576. PCIE_SOC_WAKE_ADDRESS);
  577. }
  578. }
  579. /*
  580. * FIXME: Handle OOM properly.
  581. */
  582. static inline
  583. struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
  584. {
  585. struct ath10k_pci_compl *compl = NULL;
  586. spin_lock_bh(&pipe_info->pipe_lock);
  587. if (list_empty(&pipe_info->compl_free)) {
  588. ath10k_warn("Completion buffers are full\n");
  589. goto exit;
  590. }
  591. compl = list_first_entry(&pipe_info->compl_free,
  592. struct ath10k_pci_compl, list);
  593. list_del(&compl->list);
  594. exit:
  595. spin_unlock_bh(&pipe_info->pipe_lock);
  596. return compl;
  597. }
  598. /* Called by lower (CE) layer when a send to Target completes. */
  599. static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
  600. {
  601. struct ath10k *ar = ce_state->ar;
  602. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  603. struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
  604. struct ath10k_pci_compl *compl;
  605. void *transfer_context;
  606. u32 ce_data;
  607. unsigned int nbytes;
  608. unsigned int transfer_id;
  609. while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
  610. &ce_data, &nbytes,
  611. &transfer_id) == 0) {
  612. compl = get_free_compl(pipe_info);
  613. if (!compl)
  614. break;
  615. compl->state = ATH10K_PCI_COMPL_SEND;
  616. compl->ce_state = ce_state;
  617. compl->pipe_info = pipe_info;
  618. compl->skb = transfer_context;
  619. compl->nbytes = nbytes;
  620. compl->transfer_id = transfer_id;
  621. compl->flags = 0;
  622. /*
  623. * Add the completion to the processing queue.
  624. */
  625. spin_lock_bh(&ar_pci->compl_lock);
  626. list_add_tail(&compl->list, &ar_pci->compl_process);
  627. spin_unlock_bh(&ar_pci->compl_lock);
  628. }
  629. ath10k_pci_process_ce(ar);
  630. }
  631. /* Called by lower (CE) layer when data is received from the Target. */
  632. static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
  633. {
  634. struct ath10k *ar = ce_state->ar;
  635. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  636. struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
  637. struct ath10k_pci_compl *compl;
  638. struct sk_buff *skb;
  639. void *transfer_context;
  640. u32 ce_data;
  641. unsigned int nbytes;
  642. unsigned int transfer_id;
  643. unsigned int flags;
  644. while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
  645. &ce_data, &nbytes, &transfer_id,
  646. &flags) == 0) {
  647. compl = get_free_compl(pipe_info);
  648. if (!compl)
  649. break;
  650. compl->state = ATH10K_PCI_COMPL_RECV;
  651. compl->ce_state = ce_state;
  652. compl->pipe_info = pipe_info;
  653. compl->skb = transfer_context;
  654. compl->nbytes = nbytes;
  655. compl->transfer_id = transfer_id;
  656. compl->flags = flags;
  657. skb = transfer_context;
  658. dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
  659. skb->len + skb_tailroom(skb),
  660. DMA_FROM_DEVICE);
  661. /*
  662. * Add the completion to the processing queue.
  663. */
  664. spin_lock_bh(&ar_pci->compl_lock);
  665. list_add_tail(&compl->list, &ar_pci->compl_process);
  666. spin_unlock_bh(&ar_pci->compl_lock);
  667. }
  668. ath10k_pci_process_ce(ar);
  669. }
  670. /* Send the first nbytes bytes of the buffer */
  671. static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
  672. unsigned int transfer_id,
  673. unsigned int bytes, struct sk_buff *nbuf)
  674. {
  675. struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
  676. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  677. struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
  678. struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
  679. unsigned int len;
  680. u32 flags = 0;
  681. int ret;
  682. len = min(bytes, nbuf->len);
  683. bytes -= len;
  684. if (len & 3)
  685. ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
  686. ath10k_dbg(ATH10K_DBG_PCI,
  687. "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
  688. nbuf->data, (unsigned long long) skb_cb->paddr,
  689. nbuf->len, len);
  690. ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
  691. "ath10k tx: data: ",
  692. nbuf->data, nbuf->len);
  693. ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
  694. flags);
  695. if (ret)
  696. ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
  697. return ret;
  698. }
  699. static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
  700. {
  701. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  702. return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
  703. }
  704. static void ath10k_pci_hif_dump_area(struct ath10k *ar)
  705. {
  706. u32 reg_dump_area = 0;
  707. u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
  708. u32 host_addr;
  709. int ret;
  710. u32 i;
  711. ath10k_err("firmware crashed!\n");
  712. ath10k_err("hardware name %s version 0x%x\n",
  713. ar->hw_params.name, ar->target_version);
  714. ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
  715. ar->fw_version_minor, ar->fw_version_release,
  716. ar->fw_version_build);
  717. host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
  718. ret = ath10k_pci_diag_read_mem(ar, host_addr,
  719. &reg_dump_area, sizeof(u32));
  720. if (ret) {
  721. ath10k_err("failed to read FW dump area address: %d\n", ret);
  722. return;
  723. }
  724. ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
  725. ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
  726. &reg_dump_values[0],
  727. REG_DUMP_COUNT_QCA988X * sizeof(u32));
  728. if (ret != 0) {
  729. ath10k_err("failed to read FW dump area: %d\n", ret);
  730. return;
  731. }
  732. BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
  733. ath10k_err("target Register Dump\n");
  734. for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
  735. ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
  736. i,
  737. reg_dump_values[i],
  738. reg_dump_values[i + 1],
  739. reg_dump_values[i + 2],
  740. reg_dump_values[i + 3]);
  741. queue_work(ar->workqueue, &ar->restart_work);
  742. }
  743. static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
  744. int force)
  745. {
  746. if (!force) {
  747. int resources;
  748. /*
  749. * Decide whether to actually poll for completions, or just
  750. * wait for a later chance.
  751. * If there seem to be plenty of resources left, then just wait
  752. * since checking involves reading a CE register, which is a
  753. * relatively expensive operation.
  754. */
  755. resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
  756. /*
  757. * If at least 50% of the total resources are still available,
  758. * don't bother checking again yet.
  759. */
  760. if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
  761. return;
  762. }
  763. ath10k_ce_per_engine_service(ar, pipe);
  764. }
  765. static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
  766. struct ath10k_hif_cb *callbacks)
  767. {
  768. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  769. ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
  770. memcpy(&ar_pci->msg_callbacks_current, callbacks,
  771. sizeof(ar_pci->msg_callbacks_current));
  772. }
  773. static int ath10k_pci_alloc_compl(struct ath10k *ar)
  774. {
  775. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  776. const struct ce_attr *attr;
  777. struct ath10k_pci_pipe *pipe_info;
  778. struct ath10k_pci_compl *compl;
  779. int i, pipe_num, completions;
  780. spin_lock_init(&ar_pci->compl_lock);
  781. INIT_LIST_HEAD(&ar_pci->compl_process);
  782. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  783. pipe_info = &ar_pci->pipe_info[pipe_num];
  784. spin_lock_init(&pipe_info->pipe_lock);
  785. INIT_LIST_HEAD(&pipe_info->compl_free);
  786. /* Handle Diagnostic CE specially */
  787. if (pipe_info->ce_hdl == ar_pci->ce_diag)
  788. continue;
  789. attr = &host_ce_config_wlan[pipe_num];
  790. completions = 0;
  791. if (attr->src_nentries)
  792. completions += attr->src_nentries;
  793. if (attr->dest_nentries)
  794. completions += attr->dest_nentries;
  795. for (i = 0; i < completions; i++) {
  796. compl = kmalloc(sizeof(*compl), GFP_KERNEL);
  797. if (!compl) {
  798. ath10k_warn("No memory for completion state\n");
  799. ath10k_pci_cleanup_ce(ar);
  800. return -ENOMEM;
  801. }
  802. compl->state = ATH10K_PCI_COMPL_FREE;
  803. list_add_tail(&compl->list, &pipe_info->compl_free);
  804. }
  805. }
  806. return 0;
  807. }
  808. static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
  809. {
  810. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  811. const struct ce_attr *attr;
  812. struct ath10k_pci_pipe *pipe_info;
  813. int pipe_num, disable_interrupts;
  814. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  815. pipe_info = &ar_pci->pipe_info[pipe_num];
  816. /* Handle Diagnostic CE specially */
  817. if (pipe_info->ce_hdl == ar_pci->ce_diag)
  818. continue;
  819. attr = &host_ce_config_wlan[pipe_num];
  820. if (attr->src_nentries) {
  821. disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
  822. ath10k_ce_send_cb_register(pipe_info->ce_hdl,
  823. ath10k_pci_ce_send_done,
  824. disable_interrupts);
  825. }
  826. if (attr->dest_nentries)
  827. ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
  828. ath10k_pci_ce_recv_data);
  829. }
  830. return 0;
  831. }
  832. static void ath10k_pci_kill_tasklet(struct ath10k *ar)
  833. {
  834. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  835. int i;
  836. tasklet_kill(&ar_pci->intr_tq);
  837. tasklet_kill(&ar_pci->msi_fw_err);
  838. tasklet_kill(&ar_pci->early_irq_tasklet);
  839. for (i = 0; i < CE_COUNT; i++)
  840. tasklet_kill(&ar_pci->pipe_info[i].intr);
  841. }
  842. static void ath10k_pci_stop_ce(struct ath10k *ar)
  843. {
  844. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  845. struct ath10k_pci_compl *compl;
  846. struct sk_buff *skb;
  847. /* Mark pending completions as aborted, so that upper layers free up
  848. * their associated resources */
  849. spin_lock_bh(&ar_pci->compl_lock);
  850. list_for_each_entry(compl, &ar_pci->compl_process, list) {
  851. skb = compl->skb;
  852. ATH10K_SKB_CB(skb)->is_aborted = true;
  853. }
  854. spin_unlock_bh(&ar_pci->compl_lock);
  855. }
  856. static void ath10k_pci_cleanup_ce(struct ath10k *ar)
  857. {
  858. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  859. struct ath10k_pci_compl *compl, *tmp;
  860. struct ath10k_pci_pipe *pipe_info;
  861. struct sk_buff *netbuf;
  862. int pipe_num;
  863. /* Free pending completions. */
  864. spin_lock_bh(&ar_pci->compl_lock);
  865. if (!list_empty(&ar_pci->compl_process))
  866. ath10k_warn("pending completions still present! possible memory leaks.\n");
  867. list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
  868. list_del(&compl->list);
  869. netbuf = compl->skb;
  870. dev_kfree_skb_any(netbuf);
  871. kfree(compl);
  872. }
  873. spin_unlock_bh(&ar_pci->compl_lock);
  874. /* Free unused completions for each pipe. */
  875. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  876. pipe_info = &ar_pci->pipe_info[pipe_num];
  877. spin_lock_bh(&pipe_info->pipe_lock);
  878. list_for_each_entry_safe(compl, tmp,
  879. &pipe_info->compl_free, list) {
  880. list_del(&compl->list);
  881. kfree(compl);
  882. }
  883. spin_unlock_bh(&pipe_info->pipe_lock);
  884. }
  885. }
  886. static void ath10k_pci_process_ce(struct ath10k *ar)
  887. {
  888. struct ath10k_pci *ar_pci = ar->hif.priv;
  889. struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
  890. struct ath10k_pci_compl *compl;
  891. struct sk_buff *skb;
  892. unsigned int nbytes;
  893. int ret, send_done = 0;
  894. /* Upper layers aren't ready to handle tx/rx completions in parallel so
  895. * we must serialize all completion processing. */
  896. spin_lock_bh(&ar_pci->compl_lock);
  897. if (ar_pci->compl_processing) {
  898. spin_unlock_bh(&ar_pci->compl_lock);
  899. return;
  900. }
  901. ar_pci->compl_processing = true;
  902. spin_unlock_bh(&ar_pci->compl_lock);
  903. for (;;) {
  904. spin_lock_bh(&ar_pci->compl_lock);
  905. if (list_empty(&ar_pci->compl_process)) {
  906. spin_unlock_bh(&ar_pci->compl_lock);
  907. break;
  908. }
  909. compl = list_first_entry(&ar_pci->compl_process,
  910. struct ath10k_pci_compl, list);
  911. list_del(&compl->list);
  912. spin_unlock_bh(&ar_pci->compl_lock);
  913. switch (compl->state) {
  914. case ATH10K_PCI_COMPL_SEND:
  915. cb->tx_completion(ar,
  916. compl->skb,
  917. compl->transfer_id);
  918. send_done = 1;
  919. break;
  920. case ATH10K_PCI_COMPL_RECV:
  921. ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
  922. if (ret) {
  923. ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
  924. compl->pipe_info->pipe_num, ret);
  925. break;
  926. }
  927. skb = compl->skb;
  928. nbytes = compl->nbytes;
  929. ath10k_dbg(ATH10K_DBG_PCI,
  930. "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
  931. skb, nbytes);
  932. ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
  933. "ath10k rx: ", skb->data, nbytes);
  934. if (skb->len + skb_tailroom(skb) >= nbytes) {
  935. skb_trim(skb, 0);
  936. skb_put(skb, nbytes);
  937. cb->rx_completion(ar, skb,
  938. compl->pipe_info->pipe_num);
  939. } else {
  940. ath10k_warn("rxed more than expected (nbytes %d, max %d)",
  941. nbytes,
  942. skb->len + skb_tailroom(skb));
  943. }
  944. break;
  945. case ATH10K_PCI_COMPL_FREE:
  946. ath10k_warn("free completion cannot be processed\n");
  947. break;
  948. default:
  949. ath10k_warn("invalid completion state (%d)\n",
  950. compl->state);
  951. break;
  952. }
  953. compl->state = ATH10K_PCI_COMPL_FREE;
  954. /*
  955. * Add completion back to the pipe's free list.
  956. */
  957. spin_lock_bh(&compl->pipe_info->pipe_lock);
  958. list_add_tail(&compl->list, &compl->pipe_info->compl_free);
  959. spin_unlock_bh(&compl->pipe_info->pipe_lock);
  960. }
  961. spin_lock_bh(&ar_pci->compl_lock);
  962. ar_pci->compl_processing = false;
  963. spin_unlock_bh(&ar_pci->compl_lock);
  964. }
  965. /* TODO - temporary mapping while we have too few CE's */
  966. static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
  967. u16 service_id, u8 *ul_pipe,
  968. u8 *dl_pipe, int *ul_is_polled,
  969. int *dl_is_polled)
  970. {
  971. int ret = 0;
  972. /* polling for received messages not supported */
  973. *dl_is_polled = 0;
  974. switch (service_id) {
  975. case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
  976. /*
  977. * Host->target HTT gets its own pipe, so it can be polled
  978. * while other pipes are interrupt driven.
  979. */
  980. *ul_pipe = 4;
  981. /*
  982. * Use the same target->host pipe for HTC ctrl, HTC raw
  983. * streams, and HTT.
  984. */
  985. *dl_pipe = 1;
  986. break;
  987. case ATH10K_HTC_SVC_ID_RSVD_CTRL:
  988. case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
  989. /*
  990. * Note: HTC_RAW_STREAMS_SVC is currently unused, and
  991. * HTC_CTRL_RSVD_SVC could share the same pipe as the
  992. * WMI services. So, if another CE is needed, change
  993. * this to *ul_pipe = 3, which frees up CE 0.
  994. */
  995. /* *ul_pipe = 3; */
  996. *ul_pipe = 0;
  997. *dl_pipe = 1;
  998. break;
  999. case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
  1000. case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
  1001. case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
  1002. case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
  1003. case ATH10K_HTC_SVC_ID_WMI_CONTROL:
  1004. *ul_pipe = 3;
  1005. *dl_pipe = 2;
  1006. break;
  1007. /* pipe 5 unused */
  1008. /* pipe 6 reserved */
  1009. /* pipe 7 reserved */
  1010. default:
  1011. ret = -1;
  1012. break;
  1013. }
  1014. *ul_is_polled =
  1015. (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
  1016. return ret;
  1017. }
  1018. static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
  1019. u8 *ul_pipe, u8 *dl_pipe)
  1020. {
  1021. int ul_is_polled, dl_is_polled;
  1022. (void)ath10k_pci_hif_map_service_to_pipe(ar,
  1023. ATH10K_HTC_SVC_ID_RSVD_CTRL,
  1024. ul_pipe,
  1025. dl_pipe,
  1026. &ul_is_polled,
  1027. &dl_is_polled);
  1028. }
  1029. static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
  1030. int num)
  1031. {
  1032. struct ath10k *ar = pipe_info->hif_ce_state;
  1033. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1034. struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
  1035. struct sk_buff *skb;
  1036. dma_addr_t ce_data;
  1037. int i, ret = 0;
  1038. if (pipe_info->buf_sz == 0)
  1039. return 0;
  1040. for (i = 0; i < num; i++) {
  1041. skb = dev_alloc_skb(pipe_info->buf_sz);
  1042. if (!skb) {
  1043. ath10k_warn("failed to allocate skbuff for pipe %d\n",
  1044. num);
  1045. ret = -ENOMEM;
  1046. goto err;
  1047. }
  1048. WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  1049. ce_data = dma_map_single(ar->dev, skb->data,
  1050. skb->len + skb_tailroom(skb),
  1051. DMA_FROM_DEVICE);
  1052. if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
  1053. ath10k_warn("failed to DMA map sk_buff\n");
  1054. dev_kfree_skb_any(skb);
  1055. ret = -EIO;
  1056. goto err;
  1057. }
  1058. ATH10K_SKB_CB(skb)->paddr = ce_data;
  1059. pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
  1060. pipe_info->buf_sz,
  1061. PCI_DMA_FROMDEVICE);
  1062. ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
  1063. ce_data);
  1064. if (ret) {
  1065. ath10k_warn("failed to enqueue to pipe %d: %d\n",
  1066. num, ret);
  1067. goto err;
  1068. }
  1069. }
  1070. return ret;
  1071. err:
  1072. ath10k_pci_rx_pipe_cleanup(pipe_info);
  1073. return ret;
  1074. }
  1075. static int ath10k_pci_post_rx(struct ath10k *ar)
  1076. {
  1077. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1078. struct ath10k_pci_pipe *pipe_info;
  1079. const struct ce_attr *attr;
  1080. int pipe_num, ret = 0;
  1081. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  1082. pipe_info = &ar_pci->pipe_info[pipe_num];
  1083. attr = &host_ce_config_wlan[pipe_num];
  1084. if (attr->dest_nentries == 0)
  1085. continue;
  1086. ret = ath10k_pci_post_rx_pipe(pipe_info,
  1087. attr->dest_nentries - 1);
  1088. if (ret) {
  1089. ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
  1090. pipe_num, ret);
  1091. for (; pipe_num >= 0; pipe_num--) {
  1092. pipe_info = &ar_pci->pipe_info[pipe_num];
  1093. ath10k_pci_rx_pipe_cleanup(pipe_info);
  1094. }
  1095. return ret;
  1096. }
  1097. }
  1098. return 0;
  1099. }
  1100. static int ath10k_pci_hif_start(struct ath10k *ar)
  1101. {
  1102. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1103. int ret, ret_early;
  1104. ath10k_pci_free_early_irq(ar);
  1105. ath10k_pci_kill_tasklet(ar);
  1106. ret = ath10k_pci_alloc_compl(ar);
  1107. if (ret) {
  1108. ath10k_warn("failed to allocate CE completions: %d\n", ret);
  1109. goto err_early_irq;
  1110. }
  1111. ret = ath10k_pci_request_irq(ar);
  1112. if (ret) {
  1113. ath10k_warn("failed to post RX buffers for all pipes: %d\n",
  1114. ret);
  1115. goto err_free_compl;
  1116. }
  1117. ret = ath10k_pci_setup_ce_irq(ar);
  1118. if (ret) {
  1119. ath10k_warn("failed to setup CE interrupts: %d\n", ret);
  1120. goto err_stop;
  1121. }
  1122. /* Post buffers once to start things off. */
  1123. ret = ath10k_pci_post_rx(ar);
  1124. if (ret) {
  1125. ath10k_warn("failed to post RX buffers for all pipes: %d\n",
  1126. ret);
  1127. goto err_stop;
  1128. }
  1129. ar_pci->started = 1;
  1130. return 0;
  1131. err_stop:
  1132. ath10k_ce_disable_interrupts(ar);
  1133. ath10k_pci_free_irq(ar);
  1134. ath10k_pci_kill_tasklet(ar);
  1135. ath10k_pci_stop_ce(ar);
  1136. ath10k_pci_process_ce(ar);
  1137. err_free_compl:
  1138. ath10k_pci_cleanup_ce(ar);
  1139. err_early_irq:
  1140. /* Though there should be no interrupts (device was reset)
  1141. * power_down() expects the early IRQ to be installed as per the
  1142. * driver lifecycle. */
  1143. ret_early = ath10k_pci_request_early_irq(ar);
  1144. if (ret_early)
  1145. ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
  1146. return ret;
  1147. }
  1148. static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
  1149. {
  1150. struct ath10k *ar;
  1151. struct ath10k_pci *ar_pci;
  1152. struct ath10k_ce_pipe *ce_hdl;
  1153. u32 buf_sz;
  1154. struct sk_buff *netbuf;
  1155. u32 ce_data;
  1156. buf_sz = pipe_info->buf_sz;
  1157. /* Unused Copy Engine */
  1158. if (buf_sz == 0)
  1159. return;
  1160. ar = pipe_info->hif_ce_state;
  1161. ar_pci = ath10k_pci_priv(ar);
  1162. if (!ar_pci->started)
  1163. return;
  1164. ce_hdl = pipe_info->ce_hdl;
  1165. while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
  1166. &ce_data) == 0) {
  1167. dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
  1168. netbuf->len + skb_tailroom(netbuf),
  1169. DMA_FROM_DEVICE);
  1170. dev_kfree_skb_any(netbuf);
  1171. }
  1172. }
  1173. static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
  1174. {
  1175. struct ath10k *ar;
  1176. struct ath10k_pci *ar_pci;
  1177. struct ath10k_ce_pipe *ce_hdl;
  1178. struct sk_buff *netbuf;
  1179. u32 ce_data;
  1180. unsigned int nbytes;
  1181. unsigned int id;
  1182. u32 buf_sz;
  1183. buf_sz = pipe_info->buf_sz;
  1184. /* Unused Copy Engine */
  1185. if (buf_sz == 0)
  1186. return;
  1187. ar = pipe_info->hif_ce_state;
  1188. ar_pci = ath10k_pci_priv(ar);
  1189. if (!ar_pci->started)
  1190. return;
  1191. ce_hdl = pipe_info->ce_hdl;
  1192. while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
  1193. &ce_data, &nbytes, &id) == 0) {
  1194. /*
  1195. * Indicate the completion to higer layer to free
  1196. * the buffer
  1197. */
  1198. if (!netbuf) {
  1199. ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
  1200. ce_hdl->id);
  1201. continue;
  1202. }
  1203. ATH10K_SKB_CB(netbuf)->is_aborted = true;
  1204. ar_pci->msg_callbacks_current.tx_completion(ar,
  1205. netbuf,
  1206. id);
  1207. }
  1208. }
  1209. /*
  1210. * Cleanup residual buffers for device shutdown:
  1211. * buffers that were enqueued for receive
  1212. * buffers that were to be sent
  1213. * Note: Buffers that had completed but which were
  1214. * not yet processed are on a completion queue. They
  1215. * are handled when the completion thread shuts down.
  1216. */
  1217. static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
  1218. {
  1219. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1220. int pipe_num;
  1221. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  1222. struct ath10k_pci_pipe *pipe_info;
  1223. pipe_info = &ar_pci->pipe_info[pipe_num];
  1224. ath10k_pci_rx_pipe_cleanup(pipe_info);
  1225. ath10k_pci_tx_pipe_cleanup(pipe_info);
  1226. }
  1227. }
  1228. static void ath10k_pci_ce_deinit(struct ath10k *ar)
  1229. {
  1230. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1231. struct ath10k_pci_pipe *pipe_info;
  1232. int pipe_num;
  1233. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  1234. pipe_info = &ar_pci->pipe_info[pipe_num];
  1235. if (pipe_info->ce_hdl) {
  1236. ath10k_ce_deinit(pipe_info->ce_hdl);
  1237. pipe_info->ce_hdl = NULL;
  1238. pipe_info->buf_sz = 0;
  1239. }
  1240. }
  1241. }
  1242. static void ath10k_pci_hif_stop(struct ath10k *ar)
  1243. {
  1244. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1245. int ret;
  1246. ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
  1247. ret = ath10k_ce_disable_interrupts(ar);
  1248. if (ret)
  1249. ath10k_warn("failed to disable CE interrupts: %d\n", ret);
  1250. ath10k_pci_free_irq(ar);
  1251. ath10k_pci_kill_tasklet(ar);
  1252. ath10k_pci_stop_ce(ar);
  1253. ret = ath10k_pci_request_early_irq(ar);
  1254. if (ret)
  1255. ath10k_warn("failed to re-enable early irq: %d\n", ret);
  1256. /* At this point, asynchronous threads are stopped, the target should
  1257. * not DMA nor interrupt. We process the leftovers and then free
  1258. * everything else up. */
  1259. ath10k_pci_process_ce(ar);
  1260. ath10k_pci_cleanup_ce(ar);
  1261. ath10k_pci_buffer_cleanup(ar);
  1262. /* Make the sure the device won't access any structures on the host by
  1263. * resetting it. The device was fed with PCI CE ringbuffer
  1264. * configuration during init. If ringbuffers are freed and the device
  1265. * were to access them this could lead to memory corruption on the
  1266. * host. */
  1267. ath10k_pci_device_reset(ar);
  1268. ar_pci->started = 0;
  1269. }
  1270. static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
  1271. void *req, u32 req_len,
  1272. void *resp, u32 *resp_len)
  1273. {
  1274. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1275. struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
  1276. struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
  1277. struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
  1278. struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
  1279. dma_addr_t req_paddr = 0;
  1280. dma_addr_t resp_paddr = 0;
  1281. struct bmi_xfer xfer = {};
  1282. void *treq, *tresp = NULL;
  1283. int ret = 0;
  1284. might_sleep();
  1285. if (resp && !resp_len)
  1286. return -EINVAL;
  1287. if (resp && resp_len && *resp_len == 0)
  1288. return -EINVAL;
  1289. treq = kmemdup(req, req_len, GFP_KERNEL);
  1290. if (!treq)
  1291. return -ENOMEM;
  1292. req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
  1293. ret = dma_mapping_error(ar->dev, req_paddr);
  1294. if (ret)
  1295. goto err_dma;
  1296. if (resp && resp_len) {
  1297. tresp = kzalloc(*resp_len, GFP_KERNEL);
  1298. if (!tresp) {
  1299. ret = -ENOMEM;
  1300. goto err_req;
  1301. }
  1302. resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
  1303. DMA_FROM_DEVICE);
  1304. ret = dma_mapping_error(ar->dev, resp_paddr);
  1305. if (ret)
  1306. goto err_req;
  1307. xfer.wait_for_resp = true;
  1308. xfer.resp_len = 0;
  1309. ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
  1310. }
  1311. init_completion(&xfer.done);
  1312. ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
  1313. if (ret)
  1314. goto err_resp;
  1315. ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
  1316. if (ret) {
  1317. u32 unused_buffer;
  1318. unsigned int unused_nbytes;
  1319. unsigned int unused_id;
  1320. ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
  1321. &unused_nbytes, &unused_id);
  1322. } else {
  1323. /* non-zero means we did not time out */
  1324. ret = 0;
  1325. }
  1326. err_resp:
  1327. if (resp) {
  1328. u32 unused_buffer;
  1329. ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
  1330. dma_unmap_single(ar->dev, resp_paddr,
  1331. *resp_len, DMA_FROM_DEVICE);
  1332. }
  1333. err_req:
  1334. dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
  1335. if (ret == 0 && resp_len) {
  1336. *resp_len = min(*resp_len, xfer.resp_len);
  1337. memcpy(resp, tresp, xfer.resp_len);
  1338. }
  1339. err_dma:
  1340. kfree(treq);
  1341. kfree(tresp);
  1342. return ret;
  1343. }
  1344. static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
  1345. {
  1346. struct bmi_xfer *xfer;
  1347. u32 ce_data;
  1348. unsigned int nbytes;
  1349. unsigned int transfer_id;
  1350. if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
  1351. &nbytes, &transfer_id))
  1352. return;
  1353. if (xfer->wait_for_resp)
  1354. return;
  1355. complete(&xfer->done);
  1356. }
  1357. static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
  1358. {
  1359. struct bmi_xfer *xfer;
  1360. u32 ce_data;
  1361. unsigned int nbytes;
  1362. unsigned int transfer_id;
  1363. unsigned int flags;
  1364. if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
  1365. &nbytes, &transfer_id, &flags))
  1366. return;
  1367. if (!xfer->wait_for_resp) {
  1368. ath10k_warn("unexpected: BMI data received; ignoring\n");
  1369. return;
  1370. }
  1371. xfer->resp_len = nbytes;
  1372. complete(&xfer->done);
  1373. }
  1374. static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
  1375. struct ath10k_ce_pipe *rx_pipe,
  1376. struct bmi_xfer *xfer)
  1377. {
  1378. unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
  1379. while (time_before_eq(jiffies, timeout)) {
  1380. ath10k_pci_bmi_send_done(tx_pipe);
  1381. ath10k_pci_bmi_recv_data(rx_pipe);
  1382. if (completion_done(&xfer->done))
  1383. return 0;
  1384. schedule();
  1385. }
  1386. return -ETIMEDOUT;
  1387. }
  1388. /*
  1389. * Map from service/endpoint to Copy Engine.
  1390. * This table is derived from the CE_PCI TABLE, above.
  1391. * It is passed to the Target at startup for use by firmware.
  1392. */
  1393. static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
  1394. {
  1395. ATH10K_HTC_SVC_ID_WMI_DATA_VO,
  1396. PIPEDIR_OUT, /* out = UL = host -> target */
  1397. 3,
  1398. },
  1399. {
  1400. ATH10K_HTC_SVC_ID_WMI_DATA_VO,
  1401. PIPEDIR_IN, /* in = DL = target -> host */
  1402. 2,
  1403. },
  1404. {
  1405. ATH10K_HTC_SVC_ID_WMI_DATA_BK,
  1406. PIPEDIR_OUT, /* out = UL = host -> target */
  1407. 3,
  1408. },
  1409. {
  1410. ATH10K_HTC_SVC_ID_WMI_DATA_BK,
  1411. PIPEDIR_IN, /* in = DL = target -> host */
  1412. 2,
  1413. },
  1414. {
  1415. ATH10K_HTC_SVC_ID_WMI_DATA_BE,
  1416. PIPEDIR_OUT, /* out = UL = host -> target */
  1417. 3,
  1418. },
  1419. {
  1420. ATH10K_HTC_SVC_ID_WMI_DATA_BE,
  1421. PIPEDIR_IN, /* in = DL = target -> host */
  1422. 2,
  1423. },
  1424. {
  1425. ATH10K_HTC_SVC_ID_WMI_DATA_VI,
  1426. PIPEDIR_OUT, /* out = UL = host -> target */
  1427. 3,
  1428. },
  1429. {
  1430. ATH10K_HTC_SVC_ID_WMI_DATA_VI,
  1431. PIPEDIR_IN, /* in = DL = target -> host */
  1432. 2,
  1433. },
  1434. {
  1435. ATH10K_HTC_SVC_ID_WMI_CONTROL,
  1436. PIPEDIR_OUT, /* out = UL = host -> target */
  1437. 3,
  1438. },
  1439. {
  1440. ATH10K_HTC_SVC_ID_WMI_CONTROL,
  1441. PIPEDIR_IN, /* in = DL = target -> host */
  1442. 2,
  1443. },
  1444. {
  1445. ATH10K_HTC_SVC_ID_RSVD_CTRL,
  1446. PIPEDIR_OUT, /* out = UL = host -> target */
  1447. 0, /* could be moved to 3 (share with WMI) */
  1448. },
  1449. {
  1450. ATH10K_HTC_SVC_ID_RSVD_CTRL,
  1451. PIPEDIR_IN, /* in = DL = target -> host */
  1452. 1,
  1453. },
  1454. {
  1455. ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
  1456. PIPEDIR_OUT, /* out = UL = host -> target */
  1457. 0,
  1458. },
  1459. {
  1460. ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
  1461. PIPEDIR_IN, /* in = DL = target -> host */
  1462. 1,
  1463. },
  1464. {
  1465. ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
  1466. PIPEDIR_OUT, /* out = UL = host -> target */
  1467. 4,
  1468. },
  1469. {
  1470. ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
  1471. PIPEDIR_IN, /* in = DL = target -> host */
  1472. 1,
  1473. },
  1474. /* (Additions here) */
  1475. { /* Must be last */
  1476. 0,
  1477. 0,
  1478. 0,
  1479. },
  1480. };
  1481. /*
  1482. * Send an interrupt to the device to wake up the Target CPU
  1483. * so it has an opportunity to notice any changed state.
  1484. */
  1485. static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
  1486. {
  1487. int ret;
  1488. u32 core_ctrl;
  1489. ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
  1490. CORE_CTRL_ADDRESS,
  1491. &core_ctrl);
  1492. if (ret) {
  1493. ath10k_warn("failed to read core_ctrl: %d\n", ret);
  1494. return ret;
  1495. }
  1496. /* A_INUM_FIRMWARE interrupt to Target CPU */
  1497. core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
  1498. ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
  1499. CORE_CTRL_ADDRESS,
  1500. core_ctrl);
  1501. if (ret) {
  1502. ath10k_warn("failed to set target CPU interrupt mask: %d\n",
  1503. ret);
  1504. return ret;
  1505. }
  1506. return 0;
  1507. }
  1508. static int ath10k_pci_init_config(struct ath10k *ar)
  1509. {
  1510. u32 interconnect_targ_addr;
  1511. u32 pcie_state_targ_addr = 0;
  1512. u32 pipe_cfg_targ_addr = 0;
  1513. u32 svc_to_pipe_map = 0;
  1514. u32 pcie_config_flags = 0;
  1515. u32 ealloc_value;
  1516. u32 ealloc_targ_addr;
  1517. u32 flag2_value;
  1518. u32 flag2_targ_addr;
  1519. int ret = 0;
  1520. /* Download to Target the CE Config and the service-to-CE map */
  1521. interconnect_targ_addr =
  1522. host_interest_item_address(HI_ITEM(hi_interconnect_state));
  1523. /* Supply Target-side CE configuration */
  1524. ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
  1525. &pcie_state_targ_addr);
  1526. if (ret != 0) {
  1527. ath10k_err("Failed to get pcie state addr: %d\n", ret);
  1528. return ret;
  1529. }
  1530. if (pcie_state_targ_addr == 0) {
  1531. ret = -EIO;
  1532. ath10k_err("Invalid pcie state addr\n");
  1533. return ret;
  1534. }
  1535. ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
  1536. offsetof(struct pcie_state,
  1537. pipe_cfg_addr),
  1538. &pipe_cfg_targ_addr);
  1539. if (ret != 0) {
  1540. ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
  1541. return ret;
  1542. }
  1543. if (pipe_cfg_targ_addr == 0) {
  1544. ret = -EIO;
  1545. ath10k_err("Invalid pipe cfg addr\n");
  1546. return ret;
  1547. }
  1548. ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
  1549. target_ce_config_wlan,
  1550. sizeof(target_ce_config_wlan));
  1551. if (ret != 0) {
  1552. ath10k_err("Failed to write pipe cfg: %d\n", ret);
  1553. return ret;
  1554. }
  1555. ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
  1556. offsetof(struct pcie_state,
  1557. svc_to_pipe_map),
  1558. &svc_to_pipe_map);
  1559. if (ret != 0) {
  1560. ath10k_err("Failed to get svc/pipe map: %d\n", ret);
  1561. return ret;
  1562. }
  1563. if (svc_to_pipe_map == 0) {
  1564. ret = -EIO;
  1565. ath10k_err("Invalid svc_to_pipe map\n");
  1566. return ret;
  1567. }
  1568. ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
  1569. target_service_to_ce_map_wlan,
  1570. sizeof(target_service_to_ce_map_wlan));
  1571. if (ret != 0) {
  1572. ath10k_err("Failed to write svc/pipe map: %d\n", ret);
  1573. return ret;
  1574. }
  1575. ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
  1576. offsetof(struct pcie_state,
  1577. config_flags),
  1578. &pcie_config_flags);
  1579. if (ret != 0) {
  1580. ath10k_err("Failed to get pcie config_flags: %d\n", ret);
  1581. return ret;
  1582. }
  1583. pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
  1584. ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
  1585. offsetof(struct pcie_state, config_flags),
  1586. &pcie_config_flags,
  1587. sizeof(pcie_config_flags));
  1588. if (ret != 0) {
  1589. ath10k_err("Failed to write pcie config_flags: %d\n", ret);
  1590. return ret;
  1591. }
  1592. /* configure early allocation */
  1593. ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
  1594. ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
  1595. if (ret != 0) {
  1596. ath10k_err("Faile to get early alloc val: %d\n", ret);
  1597. return ret;
  1598. }
  1599. /* first bank is switched to IRAM */
  1600. ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
  1601. HI_EARLY_ALLOC_MAGIC_MASK);
  1602. ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
  1603. HI_EARLY_ALLOC_IRAM_BANKS_MASK);
  1604. ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
  1605. if (ret != 0) {
  1606. ath10k_err("Failed to set early alloc val: %d\n", ret);
  1607. return ret;
  1608. }
  1609. /* Tell Target to proceed with initialization */
  1610. flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
  1611. ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
  1612. if (ret != 0) {
  1613. ath10k_err("Failed to get option val: %d\n", ret);
  1614. return ret;
  1615. }
  1616. flag2_value |= HI_OPTION_EARLY_CFG_DONE;
  1617. ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
  1618. if (ret != 0) {
  1619. ath10k_err("Failed to set option val: %d\n", ret);
  1620. return ret;
  1621. }
  1622. return 0;
  1623. }
  1624. static int ath10k_pci_ce_init(struct ath10k *ar)
  1625. {
  1626. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1627. struct ath10k_pci_pipe *pipe_info;
  1628. const struct ce_attr *attr;
  1629. int pipe_num;
  1630. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  1631. pipe_info = &ar_pci->pipe_info[pipe_num];
  1632. pipe_info->pipe_num = pipe_num;
  1633. pipe_info->hif_ce_state = ar;
  1634. attr = &host_ce_config_wlan[pipe_num];
  1635. pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
  1636. if (pipe_info->ce_hdl == NULL) {
  1637. ath10k_err("failed to initialize CE for pipe: %d\n",
  1638. pipe_num);
  1639. /* It is safe to call it here. It checks if ce_hdl is
  1640. * valid for each pipe */
  1641. ath10k_pci_ce_deinit(ar);
  1642. return -1;
  1643. }
  1644. if (pipe_num == CE_COUNT - 1) {
  1645. /*
  1646. * Reserve the ultimate CE for
  1647. * diagnostic Window support
  1648. */
  1649. ar_pci->ce_diag = pipe_info->ce_hdl;
  1650. continue;
  1651. }
  1652. pipe_info->buf_sz = (size_t) (attr->src_sz_max);
  1653. }
  1654. return 0;
  1655. }
  1656. static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
  1657. {
  1658. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1659. u32 fw_indicator_address, fw_indicator;
  1660. ath10k_pci_wake(ar);
  1661. fw_indicator_address = ar_pci->fw_indicator_address;
  1662. fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
  1663. if (fw_indicator & FW_IND_EVENT_PENDING) {
  1664. /* ACK: clear Target-side pending event */
  1665. ath10k_pci_write32(ar, fw_indicator_address,
  1666. fw_indicator & ~FW_IND_EVENT_PENDING);
  1667. if (ar_pci->started) {
  1668. ath10k_pci_hif_dump_area(ar);
  1669. } else {
  1670. /*
  1671. * Probable Target failure before we're prepared
  1672. * to handle it. Generally unexpected.
  1673. */
  1674. ath10k_warn("early firmware event indicated\n");
  1675. }
  1676. }
  1677. ath10k_pci_sleep(ar);
  1678. }
  1679. static int ath10k_pci_hif_power_up(struct ath10k *ar)
  1680. {
  1681. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1682. const char *irq_mode;
  1683. int ret;
  1684. /*
  1685. * Bring the target up cleanly.
  1686. *
  1687. * The target may be in an undefined state with an AUX-powered Target
  1688. * and a Host in WoW mode. If the Host crashes, loses power, or is
  1689. * restarted (without unloading the driver) then the Target is left
  1690. * (aux) powered and running. On a subsequent driver load, the Target
  1691. * is in an unexpected state. We try to catch that here in order to
  1692. * reset the Target and retry the probe.
  1693. */
  1694. ret = ath10k_pci_device_reset(ar);
  1695. if (ret) {
  1696. ath10k_err("failed to reset target: %d\n", ret);
  1697. goto err;
  1698. }
  1699. if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  1700. /* Force AWAKE forever */
  1701. ath10k_do_pci_wake(ar);
  1702. ret = ath10k_pci_ce_init(ar);
  1703. if (ret) {
  1704. ath10k_err("failed to initialize CE: %d\n", ret);
  1705. goto err_ps;
  1706. }
  1707. ret = ath10k_ce_disable_interrupts(ar);
  1708. if (ret) {
  1709. ath10k_err("failed to disable CE interrupts: %d\n", ret);
  1710. goto err_ce;
  1711. }
  1712. ret = ath10k_pci_init_irq(ar);
  1713. if (ret) {
  1714. ath10k_err("failed to init irqs: %d\n", ret);
  1715. goto err_ce;
  1716. }
  1717. ret = ath10k_pci_request_early_irq(ar);
  1718. if (ret) {
  1719. ath10k_err("failed to request early irq: %d\n", ret);
  1720. goto err_deinit_irq;
  1721. }
  1722. ret = ath10k_pci_wait_for_target_init(ar);
  1723. if (ret) {
  1724. ath10k_err("failed to wait for target to init: %d\n", ret);
  1725. goto err_free_early_irq;
  1726. }
  1727. ret = ath10k_pci_init_config(ar);
  1728. if (ret) {
  1729. ath10k_err("failed to setup init config: %d\n", ret);
  1730. goto err_free_early_irq;
  1731. }
  1732. ret = ath10k_pci_wake_target_cpu(ar);
  1733. if (ret) {
  1734. ath10k_err("could not wake up target CPU: %d\n", ret);
  1735. goto err_free_early_irq;
  1736. }
  1737. if (ar_pci->num_msi_intrs > 1)
  1738. irq_mode = "MSI-X";
  1739. else if (ar_pci->num_msi_intrs == 1)
  1740. irq_mode = "MSI";
  1741. else
  1742. irq_mode = "legacy";
  1743. if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
  1744. ath10k_info("pci irq %s\n", irq_mode);
  1745. return 0;
  1746. err_free_early_irq:
  1747. ath10k_pci_free_early_irq(ar);
  1748. err_deinit_irq:
  1749. ath10k_pci_deinit_irq(ar);
  1750. err_ce:
  1751. ath10k_pci_ce_deinit(ar);
  1752. ath10k_pci_device_reset(ar);
  1753. err_ps:
  1754. if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  1755. ath10k_do_pci_sleep(ar);
  1756. err:
  1757. return ret;
  1758. }
  1759. static void ath10k_pci_hif_power_down(struct ath10k *ar)
  1760. {
  1761. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1762. ath10k_pci_free_early_irq(ar);
  1763. ath10k_pci_kill_tasklet(ar);
  1764. ath10k_pci_deinit_irq(ar);
  1765. ath10k_pci_device_reset(ar);
  1766. ath10k_pci_ce_deinit(ar);
  1767. if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  1768. ath10k_do_pci_sleep(ar);
  1769. }
  1770. #ifdef CONFIG_PM
  1771. #define ATH10K_PCI_PM_CONTROL 0x44
  1772. static int ath10k_pci_hif_suspend(struct ath10k *ar)
  1773. {
  1774. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1775. struct pci_dev *pdev = ar_pci->pdev;
  1776. u32 val;
  1777. pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
  1778. if ((val & 0x000000ff) != 0x3) {
  1779. pci_save_state(pdev);
  1780. pci_disable_device(pdev);
  1781. pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
  1782. (val & 0xffffff00) | 0x03);
  1783. }
  1784. return 0;
  1785. }
  1786. static int ath10k_pci_hif_resume(struct ath10k *ar)
  1787. {
  1788. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1789. struct pci_dev *pdev = ar_pci->pdev;
  1790. u32 val;
  1791. pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
  1792. if ((val & 0x000000ff) != 0) {
  1793. pci_restore_state(pdev);
  1794. pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
  1795. val & 0xffffff00);
  1796. /*
  1797. * Suspend/Resume resets the PCI configuration space,
  1798. * so we have to re-disable the RETRY_TIMEOUT register (0x41)
  1799. * to keep PCI Tx retries from interfering with C3 CPU state
  1800. */
  1801. pci_read_config_dword(pdev, 0x40, &val);
  1802. if ((val & 0x0000ff00) != 0)
  1803. pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
  1804. }
  1805. return 0;
  1806. }
  1807. #endif
  1808. static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
  1809. .send_head = ath10k_pci_hif_send_head,
  1810. .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
  1811. .start = ath10k_pci_hif_start,
  1812. .stop = ath10k_pci_hif_stop,
  1813. .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
  1814. .get_default_pipe = ath10k_pci_hif_get_default_pipe,
  1815. .send_complete_check = ath10k_pci_hif_send_complete_check,
  1816. .set_callbacks = ath10k_pci_hif_set_callbacks,
  1817. .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
  1818. .power_up = ath10k_pci_hif_power_up,
  1819. .power_down = ath10k_pci_hif_power_down,
  1820. #ifdef CONFIG_PM
  1821. .suspend = ath10k_pci_hif_suspend,
  1822. .resume = ath10k_pci_hif_resume,
  1823. #endif
  1824. };
  1825. static void ath10k_pci_ce_tasklet(unsigned long ptr)
  1826. {
  1827. struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
  1828. struct ath10k_pci *ar_pci = pipe->ar_pci;
  1829. ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
  1830. }
  1831. static void ath10k_msi_err_tasklet(unsigned long data)
  1832. {
  1833. struct ath10k *ar = (struct ath10k *)data;
  1834. ath10k_pci_fw_interrupt_handler(ar);
  1835. }
  1836. /*
  1837. * Handler for a per-engine interrupt on a PARTICULAR CE.
  1838. * This is used in cases where each CE has a private MSI interrupt.
  1839. */
  1840. static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
  1841. {
  1842. struct ath10k *ar = arg;
  1843. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1844. int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
  1845. if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
  1846. ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
  1847. return IRQ_HANDLED;
  1848. }
  1849. /*
  1850. * NOTE: We are able to derive ce_id from irq because we
  1851. * use a one-to-one mapping for CE's 0..5.
  1852. * CE's 6 & 7 do not use interrupts at all.
  1853. *
  1854. * This mapping must be kept in sync with the mapping
  1855. * used by firmware.
  1856. */
  1857. tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
  1858. return IRQ_HANDLED;
  1859. }
  1860. static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
  1861. {
  1862. struct ath10k *ar = arg;
  1863. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1864. tasklet_schedule(&ar_pci->msi_fw_err);
  1865. return IRQ_HANDLED;
  1866. }
  1867. /*
  1868. * Top-level interrupt handler for all PCI interrupts from a Target.
  1869. * When a block of MSI interrupts is allocated, this top-level handler
  1870. * is not used; instead, we directly call the correct sub-handler.
  1871. */
  1872. static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
  1873. {
  1874. struct ath10k *ar = arg;
  1875. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1876. if (ar_pci->num_msi_intrs == 0) {
  1877. if (!ath10k_pci_irq_pending(ar))
  1878. return IRQ_NONE;
  1879. ath10k_pci_disable_and_clear_legacy_irq(ar);
  1880. }
  1881. tasklet_schedule(&ar_pci->intr_tq);
  1882. return IRQ_HANDLED;
  1883. }
  1884. static void ath10k_pci_early_irq_tasklet(unsigned long data)
  1885. {
  1886. struct ath10k *ar = (struct ath10k *)data;
  1887. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1888. u32 fw_ind;
  1889. int ret;
  1890. ret = ath10k_pci_wake(ar);
  1891. if (ret) {
  1892. ath10k_warn("failed to wake target in early irq tasklet: %d\n",
  1893. ret);
  1894. return;
  1895. }
  1896. fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
  1897. if (fw_ind & FW_IND_EVENT_PENDING) {
  1898. ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
  1899. fw_ind & ~FW_IND_EVENT_PENDING);
  1900. /* Some structures are unavailable during early boot or at
  1901. * driver teardown so just print that the device has crashed. */
  1902. ath10k_warn("device crashed - no diagnostics available\n");
  1903. }
  1904. ath10k_pci_sleep(ar);
  1905. ath10k_pci_enable_legacy_irq(ar);
  1906. }
  1907. static void ath10k_pci_tasklet(unsigned long data)
  1908. {
  1909. struct ath10k *ar = (struct ath10k *)data;
  1910. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1911. ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
  1912. ath10k_ce_per_engine_service_any(ar);
  1913. /* Re-enable legacy irq that was disabled in the irq handler */
  1914. if (ar_pci->num_msi_intrs == 0)
  1915. ath10k_pci_enable_legacy_irq(ar);
  1916. }
  1917. static int ath10k_pci_request_irq_msix(struct ath10k *ar)
  1918. {
  1919. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1920. int ret, i;
  1921. ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
  1922. ath10k_pci_msi_fw_handler,
  1923. IRQF_SHARED, "ath10k_pci", ar);
  1924. if (ret) {
  1925. ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
  1926. ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
  1927. return ret;
  1928. }
  1929. for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
  1930. ret = request_irq(ar_pci->pdev->irq + i,
  1931. ath10k_pci_per_engine_handler,
  1932. IRQF_SHARED, "ath10k_pci", ar);
  1933. if (ret) {
  1934. ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
  1935. ar_pci->pdev->irq + i, ret);
  1936. for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
  1937. free_irq(ar_pci->pdev->irq + i, ar);
  1938. free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
  1939. return ret;
  1940. }
  1941. }
  1942. return 0;
  1943. }
  1944. static int ath10k_pci_request_irq_msi(struct ath10k *ar)
  1945. {
  1946. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1947. int ret;
  1948. ret = request_irq(ar_pci->pdev->irq,
  1949. ath10k_pci_interrupt_handler,
  1950. IRQF_SHARED, "ath10k_pci", ar);
  1951. if (ret) {
  1952. ath10k_warn("failed to request MSI irq %d: %d\n",
  1953. ar_pci->pdev->irq, ret);
  1954. return ret;
  1955. }
  1956. return 0;
  1957. }
  1958. static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
  1959. {
  1960. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1961. int ret;
  1962. ret = request_irq(ar_pci->pdev->irq,
  1963. ath10k_pci_interrupt_handler,
  1964. IRQF_SHARED, "ath10k_pci", ar);
  1965. if (ret) {
  1966. ath10k_warn("failed to request legacy irq %d: %d\n",
  1967. ar_pci->pdev->irq, ret);
  1968. return ret;
  1969. }
  1970. return 0;
  1971. }
  1972. static int ath10k_pci_request_irq(struct ath10k *ar)
  1973. {
  1974. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1975. switch (ar_pci->num_msi_intrs) {
  1976. case 0:
  1977. return ath10k_pci_request_irq_legacy(ar);
  1978. case 1:
  1979. return ath10k_pci_request_irq_msi(ar);
  1980. case MSI_NUM_REQUEST:
  1981. return ath10k_pci_request_irq_msix(ar);
  1982. }
  1983. ath10k_warn("unknown irq configuration upon request\n");
  1984. return -EINVAL;
  1985. }
  1986. static void ath10k_pci_free_irq(struct ath10k *ar)
  1987. {
  1988. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1989. int i;
  1990. /* There's at least one interrupt irregardless whether its legacy INTR
  1991. * or MSI or MSI-X */
  1992. for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
  1993. free_irq(ar_pci->pdev->irq + i, ar);
  1994. }
  1995. static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
  1996. {
  1997. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1998. int i;
  1999. tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
  2000. tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
  2001. (unsigned long)ar);
  2002. tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
  2003. (unsigned long)ar);
  2004. for (i = 0; i < CE_COUNT; i++) {
  2005. ar_pci->pipe_info[i].ar_pci = ar_pci;
  2006. tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
  2007. (unsigned long)&ar_pci->pipe_info[i]);
  2008. }
  2009. }
  2010. static int ath10k_pci_init_irq(struct ath10k *ar)
  2011. {
  2012. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  2013. bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
  2014. ar_pci->features);
  2015. int ret;
  2016. ath10k_pci_init_irq_tasklets(ar);
  2017. if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
  2018. !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
  2019. ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
  2020. /* Try MSI-X */
  2021. if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
  2022. ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
  2023. ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
  2024. if (ret == 0)
  2025. return 0;
  2026. if (ret > 0)
  2027. pci_disable_msi(ar_pci->pdev);
  2028. /* fall-through */
  2029. }
  2030. /* Try MSI */
  2031. if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
  2032. ar_pci->num_msi_intrs = 1;
  2033. ret = pci_enable_msi(ar_pci->pdev);
  2034. if (ret == 0)
  2035. return 0;
  2036. /* fall-through */
  2037. }
  2038. /* Try legacy irq
  2039. *
  2040. * A potential race occurs here: The CORE_BASE write
  2041. * depends on target correctly decoding AXI address but
  2042. * host won't know when target writes BAR to CORE_CTRL.
  2043. * This write might get lost if target has NOT written BAR.
  2044. * For now, fix the race by repeating the write in below
  2045. * synchronization checking. */
  2046. ar_pci->num_msi_intrs = 0;
  2047. ret = ath10k_pci_wake(ar);
  2048. if (ret) {
  2049. ath10k_warn("failed to wake target: %d\n", ret);
  2050. return ret;
  2051. }
  2052. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
  2053. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  2054. ath10k_pci_sleep(ar);
  2055. return 0;
  2056. }
  2057. static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
  2058. {
  2059. int ret;
  2060. ret = ath10k_pci_wake(ar);
  2061. if (ret) {
  2062. ath10k_warn("failed to wake target: %d\n", ret);
  2063. return ret;
  2064. }
  2065. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
  2066. 0);
  2067. ath10k_pci_sleep(ar);
  2068. return 0;
  2069. }
  2070. static int ath10k_pci_deinit_irq(struct ath10k *ar)
  2071. {
  2072. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  2073. switch (ar_pci->num_msi_intrs) {
  2074. case 0:
  2075. return ath10k_pci_deinit_irq_legacy(ar);
  2076. case 1:
  2077. /* fall-through */
  2078. case MSI_NUM_REQUEST:
  2079. pci_disable_msi(ar_pci->pdev);
  2080. return 0;
  2081. }
  2082. ath10k_warn("unknown irq configuration upon deinit\n");
  2083. return -EINVAL;
  2084. }
  2085. static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
  2086. {
  2087. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  2088. int wait_limit = 300; /* 3 sec */
  2089. int ret;
  2090. ret = ath10k_pci_wake(ar);
  2091. if (ret) {
  2092. ath10k_err("failed to wake up target: %d\n", ret);
  2093. return ret;
  2094. }
  2095. while (wait_limit-- &&
  2096. !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
  2097. FW_IND_INITIALIZED)) {
  2098. if (ar_pci->num_msi_intrs == 0)
  2099. /* Fix potential race by repeating CORE_BASE writes */
  2100. iowrite32(PCIE_INTR_FIRMWARE_MASK |
  2101. PCIE_INTR_CE_MASK_ALL,
  2102. ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
  2103. PCIE_INTR_ENABLE_ADDRESS));
  2104. mdelay(10);
  2105. }
  2106. if (wait_limit < 0) {
  2107. ath10k_err("target stalled\n");
  2108. ret = -EIO;
  2109. goto out;
  2110. }
  2111. out:
  2112. ath10k_pci_sleep(ar);
  2113. return ret;
  2114. }
  2115. static int ath10k_pci_device_reset(struct ath10k *ar)
  2116. {
  2117. int i, ret;
  2118. u32 val;
  2119. ret = ath10k_do_pci_wake(ar);
  2120. if (ret) {
  2121. ath10k_err("failed to wake up target: %d\n",
  2122. ret);
  2123. return ret;
  2124. }
  2125. /* Put Target, including PCIe, into RESET. */
  2126. val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
  2127. val |= 1;
  2128. ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
  2129. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  2130. if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
  2131. RTC_STATE_COLD_RESET_MASK)
  2132. break;
  2133. msleep(1);
  2134. }
  2135. /* Pull Target, including PCIe, out of RESET. */
  2136. val &= ~1;
  2137. ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
  2138. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  2139. if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
  2140. RTC_STATE_COLD_RESET_MASK))
  2141. break;
  2142. msleep(1);
  2143. }
  2144. ath10k_do_pci_sleep(ar);
  2145. return 0;
  2146. }
  2147. static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
  2148. {
  2149. int i;
  2150. for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
  2151. if (!test_bit(i, ar_pci->features))
  2152. continue;
  2153. switch (i) {
  2154. case ATH10K_PCI_FEATURE_MSI_X:
  2155. ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
  2156. break;
  2157. case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
  2158. ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
  2159. break;
  2160. }
  2161. }
  2162. }
  2163. static int ath10k_pci_probe(struct pci_dev *pdev,
  2164. const struct pci_device_id *pci_dev)
  2165. {
  2166. void __iomem *mem;
  2167. int ret = 0;
  2168. struct ath10k *ar;
  2169. struct ath10k_pci *ar_pci;
  2170. u32 lcr_val, chip_id;
  2171. ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
  2172. ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
  2173. if (ar_pci == NULL)
  2174. return -ENOMEM;
  2175. ar_pci->pdev = pdev;
  2176. ar_pci->dev = &pdev->dev;
  2177. switch (pci_dev->device) {
  2178. case QCA988X_2_0_DEVICE_ID:
  2179. set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
  2180. break;
  2181. default:
  2182. ret = -ENODEV;
  2183. ath10k_err("Unknown device ID: %d\n", pci_dev->device);
  2184. goto err_ar_pci;
  2185. }
  2186. if (ath10k_target_ps)
  2187. set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
  2188. ath10k_pci_dump_features(ar_pci);
  2189. ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
  2190. if (!ar) {
  2191. ath10k_err("failed to create driver core\n");
  2192. ret = -EINVAL;
  2193. goto err_ar_pci;
  2194. }
  2195. ar_pci->ar = ar;
  2196. ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
  2197. atomic_set(&ar_pci->keep_awake_count, 0);
  2198. pci_set_drvdata(pdev, ar);
  2199. /*
  2200. * Without any knowledge of the Host, the Target may have been reset or
  2201. * power cycled and its Config Space may no longer reflect the PCI
  2202. * address space that was assigned earlier by the PCI infrastructure.
  2203. * Refresh it now.
  2204. */
  2205. ret = pci_assign_resource(pdev, BAR_NUM);
  2206. if (ret) {
  2207. ath10k_err("failed to assign PCI space: %d\n", ret);
  2208. goto err_ar;
  2209. }
  2210. ret = pci_enable_device(pdev);
  2211. if (ret) {
  2212. ath10k_err("failed to enable PCI device: %d\n", ret);
  2213. goto err_ar;
  2214. }
  2215. /* Request MMIO resources */
  2216. ret = pci_request_region(pdev, BAR_NUM, "ath");
  2217. if (ret) {
  2218. ath10k_err("failed to request MMIO region: %d\n", ret);
  2219. goto err_device;
  2220. }
  2221. /*
  2222. * Target structures have a limit of 32 bit DMA pointers.
  2223. * DMA pointers can be wider than 32 bits by default on some systems.
  2224. */
  2225. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  2226. if (ret) {
  2227. ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
  2228. goto err_region;
  2229. }
  2230. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  2231. if (ret) {
  2232. ath10k_err("failed to set consistent DMA mask to 32-bit\n");
  2233. goto err_region;
  2234. }
  2235. /* Set bus master bit in PCI_COMMAND to enable DMA */
  2236. pci_set_master(pdev);
  2237. /*
  2238. * Temporary FIX: disable ASPM
  2239. * Will be removed after the OTP is programmed
  2240. */
  2241. pci_read_config_dword(pdev, 0x80, &lcr_val);
  2242. pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
  2243. /* Arrange for access to Target SoC registers. */
  2244. mem = pci_iomap(pdev, BAR_NUM, 0);
  2245. if (!mem) {
  2246. ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
  2247. ret = -EIO;
  2248. goto err_master;
  2249. }
  2250. ar_pci->mem = mem;
  2251. spin_lock_init(&ar_pci->ce_lock);
  2252. ret = ath10k_do_pci_wake(ar);
  2253. if (ret) {
  2254. ath10k_err("Failed to get chip id: %d\n", ret);
  2255. goto err_iomap;
  2256. }
  2257. chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
  2258. ath10k_do_pci_sleep(ar);
  2259. ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
  2260. ret = ath10k_core_register(ar, chip_id);
  2261. if (ret) {
  2262. ath10k_err("failed to register driver core: %d\n", ret);
  2263. goto err_iomap;
  2264. }
  2265. return 0;
  2266. err_iomap:
  2267. pci_iounmap(pdev, mem);
  2268. err_master:
  2269. pci_clear_master(pdev);
  2270. err_region:
  2271. pci_release_region(pdev, BAR_NUM);
  2272. err_device:
  2273. pci_disable_device(pdev);
  2274. err_ar:
  2275. ath10k_core_destroy(ar);
  2276. err_ar_pci:
  2277. /* call HIF PCI free here */
  2278. kfree(ar_pci);
  2279. return ret;
  2280. }
  2281. static void ath10k_pci_remove(struct pci_dev *pdev)
  2282. {
  2283. struct ath10k *ar = pci_get_drvdata(pdev);
  2284. struct ath10k_pci *ar_pci;
  2285. ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
  2286. if (!ar)
  2287. return;
  2288. ar_pci = ath10k_pci_priv(ar);
  2289. if (!ar_pci)
  2290. return;
  2291. tasklet_kill(&ar_pci->msi_fw_err);
  2292. ath10k_core_unregister(ar);
  2293. pci_iounmap(pdev, ar_pci->mem);
  2294. pci_release_region(pdev, BAR_NUM);
  2295. pci_clear_master(pdev);
  2296. pci_disable_device(pdev);
  2297. ath10k_core_destroy(ar);
  2298. kfree(ar_pci);
  2299. }
  2300. MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
  2301. static struct pci_driver ath10k_pci_driver = {
  2302. .name = "ath10k_pci",
  2303. .id_table = ath10k_pci_id_table,
  2304. .probe = ath10k_pci_probe,
  2305. .remove = ath10k_pci_remove,
  2306. };
  2307. static int __init ath10k_pci_init(void)
  2308. {
  2309. int ret;
  2310. ret = pci_register_driver(&ath10k_pci_driver);
  2311. if (ret)
  2312. ath10k_err("failed to register PCI driver: %d\n", ret);
  2313. return ret;
  2314. }
  2315. module_init(ath10k_pci_init);
  2316. static void __exit ath10k_pci_exit(void)
  2317. {
  2318. pci_unregister_driver(&ath10k_pci_driver);
  2319. }
  2320. module_exit(ath10k_pci_exit);
  2321. MODULE_AUTHOR("Qualcomm Atheros");
  2322. MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
  2323. MODULE_LICENSE("Dual BSD/GPL");
  2324. MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
  2325. MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
  2326. MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);