pci.c 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <linux/pci.h>
  18. #include <linux/module.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/bitops.h>
  22. #include "core.h"
  23. #include "debug.h"
  24. #include "targaddrs.h"
  25. #include "bmi.h"
  26. #include "hif.h"
  27. #include "htc.h"
  28. #include "ce.h"
  29. #include "pci.h"
  30. enum ath10k_pci_irq_mode {
  31. ATH10K_PCI_IRQ_AUTO = 0,
  32. ATH10K_PCI_IRQ_LEGACY = 1,
  33. ATH10K_PCI_IRQ_MSI = 2,
  34. };
  35. enum ath10k_pci_reset_mode {
  36. ATH10K_PCI_RESET_AUTO = 0,
  37. ATH10K_PCI_RESET_WARM_ONLY = 1,
  38. };
  39. static unsigned int ath10k_pci_target_ps;
  40. static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  41. static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  42. module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
  43. MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
  44. module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  45. MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  46. module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
  47. MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
  48. /* how long wait to wait for target to initialise, in ms */
  49. #define ATH10K_PCI_TARGET_WAIT 3000
  50. #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  51. #define QCA988X_2_0_DEVICE_ID (0x003c)
  52. static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
  53. { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  54. {0}
  55. };
  56. static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
  57. u32 *data);
  58. static int ath10k_pci_post_rx(struct ath10k *ar);
  59. static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
  60. int num);
  61. static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
  62. static int ath10k_pci_cold_reset(struct ath10k *ar);
  63. static int ath10k_pci_warm_reset(struct ath10k *ar);
  64. static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
  65. static int ath10k_pci_init_irq(struct ath10k *ar);
  66. static int ath10k_pci_deinit_irq(struct ath10k *ar);
  67. static int ath10k_pci_request_irq(struct ath10k *ar);
  68. static void ath10k_pci_free_irq(struct ath10k *ar);
  69. static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
  70. struct ath10k_ce_pipe *rx_pipe,
  71. struct bmi_xfer *xfer);
  72. static const struct ce_attr host_ce_config_wlan[] = {
  73. /* CE0: host->target HTC control and raw streams */
  74. {
  75. .flags = CE_ATTR_FLAGS,
  76. .src_nentries = 16,
  77. .src_sz_max = 256,
  78. .dest_nentries = 0,
  79. },
  80. /* CE1: target->host HTT + HTC control */
  81. {
  82. .flags = CE_ATTR_FLAGS,
  83. .src_nentries = 0,
  84. .src_sz_max = 512,
  85. .dest_nentries = 512,
  86. },
  87. /* CE2: target->host WMI */
  88. {
  89. .flags = CE_ATTR_FLAGS,
  90. .src_nentries = 0,
  91. .src_sz_max = 2048,
  92. .dest_nentries = 32,
  93. },
  94. /* CE3: host->target WMI */
  95. {
  96. .flags = CE_ATTR_FLAGS,
  97. .src_nentries = 32,
  98. .src_sz_max = 2048,
  99. .dest_nentries = 0,
  100. },
  101. /* CE4: host->target HTT */
  102. {
  103. .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  104. .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
  105. .src_sz_max = 256,
  106. .dest_nentries = 0,
  107. },
  108. /* CE5: unused */
  109. {
  110. .flags = CE_ATTR_FLAGS,
  111. .src_nentries = 0,
  112. .src_sz_max = 0,
  113. .dest_nentries = 0,
  114. },
  115. /* CE6: target autonomous hif_memcpy */
  116. {
  117. .flags = CE_ATTR_FLAGS,
  118. .src_nentries = 0,
  119. .src_sz_max = 0,
  120. .dest_nentries = 0,
  121. },
  122. /* CE7: ce_diag, the Diagnostic Window */
  123. {
  124. .flags = CE_ATTR_FLAGS,
  125. .src_nentries = 2,
  126. .src_sz_max = DIAG_TRANSFER_LIMIT,
  127. .dest_nentries = 2,
  128. },
  129. };
  130. /* Target firmware's Copy Engine configuration. */
  131. static const struct ce_pipe_config target_ce_config_wlan[] = {
  132. /* CE0: host->target HTC control and raw streams */
  133. {
  134. .pipenum = 0,
  135. .pipedir = PIPEDIR_OUT,
  136. .nentries = 32,
  137. .nbytes_max = 256,
  138. .flags = CE_ATTR_FLAGS,
  139. .reserved = 0,
  140. },
  141. /* CE1: target->host HTT + HTC control */
  142. {
  143. .pipenum = 1,
  144. .pipedir = PIPEDIR_IN,
  145. .nentries = 32,
  146. .nbytes_max = 512,
  147. .flags = CE_ATTR_FLAGS,
  148. .reserved = 0,
  149. },
  150. /* CE2: target->host WMI */
  151. {
  152. .pipenum = 2,
  153. .pipedir = PIPEDIR_IN,
  154. .nentries = 32,
  155. .nbytes_max = 2048,
  156. .flags = CE_ATTR_FLAGS,
  157. .reserved = 0,
  158. },
  159. /* CE3: host->target WMI */
  160. {
  161. .pipenum = 3,
  162. .pipedir = PIPEDIR_OUT,
  163. .nentries = 32,
  164. .nbytes_max = 2048,
  165. .flags = CE_ATTR_FLAGS,
  166. .reserved = 0,
  167. },
  168. /* CE4: host->target HTT */
  169. {
  170. .pipenum = 4,
  171. .pipedir = PIPEDIR_OUT,
  172. .nentries = 256,
  173. .nbytes_max = 256,
  174. .flags = CE_ATTR_FLAGS,
  175. .reserved = 0,
  176. },
  177. /* NB: 50% of src nentries, since tx has 2 frags */
  178. /* CE5: unused */
  179. {
  180. .pipenum = 5,
  181. .pipedir = PIPEDIR_OUT,
  182. .nentries = 32,
  183. .nbytes_max = 2048,
  184. .flags = CE_ATTR_FLAGS,
  185. .reserved = 0,
  186. },
  187. /* CE6: Reserved for target autonomous hif_memcpy */
  188. {
  189. .pipenum = 6,
  190. .pipedir = PIPEDIR_INOUT,
  191. .nentries = 32,
  192. .nbytes_max = 4096,
  193. .flags = CE_ATTR_FLAGS,
  194. .reserved = 0,
  195. },
  196. /* CE7 used only by Host */
  197. };
  198. static bool ath10k_pci_irq_pending(struct ath10k *ar)
  199. {
  200. u32 cause;
  201. /* Check if the shared legacy irq is for us */
  202. cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  203. PCIE_INTR_CAUSE_ADDRESS);
  204. if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
  205. return true;
  206. return false;
  207. }
  208. static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
  209. {
  210. /* IMPORTANT: INTR_CLR register has to be set after
  211. * INTR_ENABLE is set to 0, otherwise interrupt can not be
  212. * really cleared. */
  213. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
  214. 0);
  215. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
  216. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  217. /* IMPORTANT: this extra read transaction is required to
  218. * flush the posted write buffer. */
  219. (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  220. PCIE_INTR_ENABLE_ADDRESS);
  221. }
  222. static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
  223. {
  224. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
  225. PCIE_INTR_ENABLE_ADDRESS,
  226. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  227. /* IMPORTANT: this extra read transaction is required to
  228. * flush the posted write buffer. */
  229. (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  230. PCIE_INTR_ENABLE_ADDRESS);
  231. }
  232. static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
  233. {
  234. struct ath10k *ar = arg;
  235. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  236. if (ar_pci->num_msi_intrs == 0) {
  237. if (!ath10k_pci_irq_pending(ar))
  238. return IRQ_NONE;
  239. ath10k_pci_disable_and_clear_legacy_irq(ar);
  240. }
  241. tasklet_schedule(&ar_pci->early_irq_tasklet);
  242. return IRQ_HANDLED;
  243. }
  244. static int ath10k_pci_request_early_irq(struct ath10k *ar)
  245. {
  246. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  247. int ret;
  248. /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
  249. * interrupt from irq vector is triggered in all cases for FW
  250. * indication/errors */
  251. ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
  252. IRQF_SHARED, "ath10k_pci (early)", ar);
  253. if (ret) {
  254. ath10k_warn("failed to request early irq: %d\n", ret);
  255. return ret;
  256. }
  257. return 0;
  258. }
  259. static void ath10k_pci_free_early_irq(struct ath10k *ar)
  260. {
  261. free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
  262. }
  263. /*
  264. * Diagnostic read/write access is provided for startup/config/debug usage.
  265. * Caller must guarantee proper alignment, when applicable, and single user
  266. * at any moment.
  267. */
  268. static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
  269. int nbytes)
  270. {
  271. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  272. int ret = 0;
  273. u32 buf;
  274. unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
  275. unsigned int id;
  276. unsigned int flags;
  277. struct ath10k_ce_pipe *ce_diag;
  278. /* Host buffer address in CE space */
  279. u32 ce_data;
  280. dma_addr_t ce_data_base = 0;
  281. void *data_buf = NULL;
  282. int i;
  283. /*
  284. * This code cannot handle reads to non-memory space. Redirect to the
  285. * register read fn but preserve the multi word read capability of
  286. * this fn
  287. */
  288. if (address < DRAM_BASE_ADDRESS) {
  289. if (!IS_ALIGNED(address, 4) ||
  290. !IS_ALIGNED((unsigned long)data, 4))
  291. return -EIO;
  292. while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
  293. ar, address, (u32 *)data)) == 0)) {
  294. nbytes -= sizeof(u32);
  295. address += sizeof(u32);
  296. data += sizeof(u32);
  297. }
  298. return ret;
  299. }
  300. ce_diag = ar_pci->ce_diag;
  301. /*
  302. * Allocate a temporary bounce buffer to hold caller's data
  303. * to be DMA'ed from Target. This guarantees
  304. * 1) 4-byte alignment
  305. * 2) Buffer in DMA-able space
  306. */
  307. orig_nbytes = nbytes;
  308. data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
  309. orig_nbytes,
  310. &ce_data_base,
  311. GFP_ATOMIC);
  312. if (!data_buf) {
  313. ret = -ENOMEM;
  314. goto done;
  315. }
  316. memset(data_buf, 0, orig_nbytes);
  317. remaining_bytes = orig_nbytes;
  318. ce_data = ce_data_base;
  319. while (remaining_bytes) {
  320. nbytes = min_t(unsigned int, remaining_bytes,
  321. DIAG_TRANSFER_LIMIT);
  322. ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
  323. if (ret != 0)
  324. goto done;
  325. /* Request CE to send from Target(!) address to Host buffer */
  326. /*
  327. * The address supplied by the caller is in the
  328. * Target CPU virtual address space.
  329. *
  330. * In order to use this address with the diagnostic CE,
  331. * convert it from Target CPU virtual address space
  332. * to CE address space
  333. */
  334. ath10k_pci_wake(ar);
  335. address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
  336. address);
  337. ath10k_pci_sleep(ar);
  338. ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
  339. 0);
  340. if (ret)
  341. goto done;
  342. i = 0;
  343. while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
  344. &completed_nbytes,
  345. &id) != 0) {
  346. mdelay(1);
  347. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  348. ret = -EBUSY;
  349. goto done;
  350. }
  351. }
  352. if (nbytes != completed_nbytes) {
  353. ret = -EIO;
  354. goto done;
  355. }
  356. if (buf != (u32) address) {
  357. ret = -EIO;
  358. goto done;
  359. }
  360. i = 0;
  361. while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
  362. &completed_nbytes,
  363. &id, &flags) != 0) {
  364. mdelay(1);
  365. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  366. ret = -EBUSY;
  367. goto done;
  368. }
  369. }
  370. if (nbytes != completed_nbytes) {
  371. ret = -EIO;
  372. goto done;
  373. }
  374. if (buf != ce_data) {
  375. ret = -EIO;
  376. goto done;
  377. }
  378. remaining_bytes -= nbytes;
  379. address += nbytes;
  380. ce_data += nbytes;
  381. }
  382. done:
  383. if (ret == 0) {
  384. /* Copy data from allocated DMA buf to caller's buf */
  385. WARN_ON_ONCE(orig_nbytes & 3);
  386. for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
  387. ((u32 *)data)[i] =
  388. __le32_to_cpu(((__le32 *)data_buf)[i]);
  389. }
  390. } else
  391. ath10k_warn("failed to read diag value at 0x%x: %d\n",
  392. address, ret);
  393. if (data_buf)
  394. dma_free_coherent(ar->dev, orig_nbytes, data_buf,
  395. ce_data_base);
  396. return ret;
  397. }
  398. /* Read 4-byte aligned data from Target memory or register */
  399. static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
  400. u32 *data)
  401. {
  402. /* Assume range doesn't cross this boundary */
  403. if (address >= DRAM_BASE_ADDRESS)
  404. return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
  405. ath10k_pci_wake(ar);
  406. *data = ath10k_pci_read32(ar, address);
  407. ath10k_pci_sleep(ar);
  408. return 0;
  409. }
  410. static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
  411. const void *data, int nbytes)
  412. {
  413. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  414. int ret = 0;
  415. u32 buf;
  416. unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
  417. unsigned int id;
  418. unsigned int flags;
  419. struct ath10k_ce_pipe *ce_diag;
  420. void *data_buf = NULL;
  421. u32 ce_data; /* Host buffer address in CE space */
  422. dma_addr_t ce_data_base = 0;
  423. int i;
  424. ce_diag = ar_pci->ce_diag;
  425. /*
  426. * Allocate a temporary bounce buffer to hold caller's data
  427. * to be DMA'ed to Target. This guarantees
  428. * 1) 4-byte alignment
  429. * 2) Buffer in DMA-able space
  430. */
  431. orig_nbytes = nbytes;
  432. data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
  433. orig_nbytes,
  434. &ce_data_base,
  435. GFP_ATOMIC);
  436. if (!data_buf) {
  437. ret = -ENOMEM;
  438. goto done;
  439. }
  440. /* Copy caller's data to allocated DMA buf */
  441. WARN_ON_ONCE(orig_nbytes & 3);
  442. for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
  443. ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
  444. /*
  445. * The address supplied by the caller is in the
  446. * Target CPU virtual address space.
  447. *
  448. * In order to use this address with the diagnostic CE,
  449. * convert it from
  450. * Target CPU virtual address space
  451. * to
  452. * CE address space
  453. */
  454. ath10k_pci_wake(ar);
  455. address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
  456. ath10k_pci_sleep(ar);
  457. remaining_bytes = orig_nbytes;
  458. ce_data = ce_data_base;
  459. while (remaining_bytes) {
  460. /* FIXME: check cast */
  461. nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
  462. /* Set up to receive directly into Target(!) address */
  463. ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
  464. if (ret != 0)
  465. goto done;
  466. /*
  467. * Request CE to send caller-supplied data that
  468. * was copied to bounce buffer to Target(!) address.
  469. */
  470. ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
  471. nbytes, 0, 0);
  472. if (ret != 0)
  473. goto done;
  474. i = 0;
  475. while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
  476. &completed_nbytes,
  477. &id) != 0) {
  478. mdelay(1);
  479. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  480. ret = -EBUSY;
  481. goto done;
  482. }
  483. }
  484. if (nbytes != completed_nbytes) {
  485. ret = -EIO;
  486. goto done;
  487. }
  488. if (buf != ce_data) {
  489. ret = -EIO;
  490. goto done;
  491. }
  492. i = 0;
  493. while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
  494. &completed_nbytes,
  495. &id, &flags) != 0) {
  496. mdelay(1);
  497. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  498. ret = -EBUSY;
  499. goto done;
  500. }
  501. }
  502. if (nbytes != completed_nbytes) {
  503. ret = -EIO;
  504. goto done;
  505. }
  506. if (buf != address) {
  507. ret = -EIO;
  508. goto done;
  509. }
  510. remaining_bytes -= nbytes;
  511. address += nbytes;
  512. ce_data += nbytes;
  513. }
  514. done:
  515. if (data_buf) {
  516. dma_free_coherent(ar->dev, orig_nbytes, data_buf,
  517. ce_data_base);
  518. }
  519. if (ret != 0)
  520. ath10k_warn("failed to write diag value at 0x%x: %d\n",
  521. address, ret);
  522. return ret;
  523. }
  524. /* Write 4B data to Target memory or register */
  525. static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
  526. u32 data)
  527. {
  528. /* Assume range doesn't cross this boundary */
  529. if (address >= DRAM_BASE_ADDRESS)
  530. return ath10k_pci_diag_write_mem(ar, address, &data,
  531. sizeof(u32));
  532. ath10k_pci_wake(ar);
  533. ath10k_pci_write32(ar, address, data);
  534. ath10k_pci_sleep(ar);
  535. return 0;
  536. }
  537. static bool ath10k_pci_target_is_awake(struct ath10k *ar)
  538. {
  539. void __iomem *mem = ath10k_pci_priv(ar)->mem;
  540. u32 val;
  541. val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
  542. RTC_STATE_ADDRESS);
  543. return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
  544. }
  545. int ath10k_do_pci_wake(struct ath10k *ar)
  546. {
  547. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  548. void __iomem *pci_addr = ar_pci->mem;
  549. int tot_delay = 0;
  550. int curr_delay = 5;
  551. if (atomic_read(&ar_pci->keep_awake_count) == 0) {
  552. /* Force AWAKE */
  553. iowrite32(PCIE_SOC_WAKE_V_MASK,
  554. pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  555. PCIE_SOC_WAKE_ADDRESS);
  556. }
  557. atomic_inc(&ar_pci->keep_awake_count);
  558. if (ar_pci->verified_awake)
  559. return 0;
  560. for (;;) {
  561. if (ath10k_pci_target_is_awake(ar)) {
  562. ar_pci->verified_awake = true;
  563. return 0;
  564. }
  565. if (tot_delay > PCIE_WAKE_TIMEOUT) {
  566. ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
  567. PCIE_WAKE_TIMEOUT,
  568. atomic_read(&ar_pci->keep_awake_count));
  569. return -ETIMEDOUT;
  570. }
  571. udelay(curr_delay);
  572. tot_delay += curr_delay;
  573. if (curr_delay < 50)
  574. curr_delay += 5;
  575. }
  576. }
  577. void ath10k_do_pci_sleep(struct ath10k *ar)
  578. {
  579. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  580. void __iomem *pci_addr = ar_pci->mem;
  581. if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
  582. /* Allow sleep */
  583. ar_pci->verified_awake = false;
  584. iowrite32(PCIE_SOC_WAKE_RESET,
  585. pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  586. PCIE_SOC_WAKE_ADDRESS);
  587. }
  588. }
  589. /* Called by lower (CE) layer when a send to Target completes. */
  590. static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
  591. {
  592. struct ath10k *ar = ce_state->ar;
  593. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  594. struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
  595. void *transfer_context;
  596. u32 ce_data;
  597. unsigned int nbytes;
  598. unsigned int transfer_id;
  599. while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
  600. &ce_data, &nbytes,
  601. &transfer_id) == 0) {
  602. /* no need to call tx completion for NULL pointers */
  603. if (transfer_context == NULL)
  604. continue;
  605. cb->tx_completion(ar, transfer_context, transfer_id);
  606. }
  607. }
  608. /* Called by lower (CE) layer when data is received from the Target. */
  609. static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
  610. {
  611. struct ath10k *ar = ce_state->ar;
  612. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  613. struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
  614. struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
  615. struct sk_buff *skb;
  616. void *transfer_context;
  617. u32 ce_data;
  618. unsigned int nbytes, max_nbytes;
  619. unsigned int transfer_id;
  620. unsigned int flags;
  621. int err;
  622. while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
  623. &ce_data, &nbytes, &transfer_id,
  624. &flags) == 0) {
  625. err = ath10k_pci_post_rx_pipe(pipe_info, 1);
  626. if (unlikely(err)) {
  627. /* FIXME: retry */
  628. ath10k_warn("failed to replenish CE rx ring %d: %d\n",
  629. pipe_info->pipe_num, err);
  630. }
  631. skb = transfer_context;
  632. max_nbytes = skb->len + skb_tailroom(skb);
  633. dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
  634. max_nbytes, DMA_FROM_DEVICE);
  635. if (unlikely(max_nbytes < nbytes)) {
  636. ath10k_warn("rxed more than expected (nbytes %d, max %d)",
  637. nbytes, max_nbytes);
  638. dev_kfree_skb_any(skb);
  639. continue;
  640. }
  641. skb_put(skb, nbytes);
  642. cb->rx_completion(ar, skb, pipe_info->pipe_num);
  643. }
  644. }
  645. static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
  646. struct ath10k_hif_sg_item *items, int n_items)
  647. {
  648. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  649. struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
  650. struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
  651. struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
  652. unsigned int nentries_mask;
  653. unsigned int sw_index;
  654. unsigned int write_index;
  655. int err, i = 0;
  656. spin_lock_bh(&ar_pci->ce_lock);
  657. nentries_mask = src_ring->nentries_mask;
  658. sw_index = src_ring->sw_index;
  659. write_index = src_ring->write_index;
  660. if (unlikely(CE_RING_DELTA(nentries_mask,
  661. write_index, sw_index - 1) < n_items)) {
  662. err = -ENOBUFS;
  663. goto err;
  664. }
  665. for (i = 0; i < n_items - 1; i++) {
  666. ath10k_dbg(ATH10K_DBG_PCI,
  667. "pci tx item %d paddr 0x%08x len %d n_items %d\n",
  668. i, items[i].paddr, items[i].len, n_items);
  669. ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
  670. items[i].vaddr, items[i].len);
  671. err = ath10k_ce_send_nolock(ce_pipe,
  672. items[i].transfer_context,
  673. items[i].paddr,
  674. items[i].len,
  675. items[i].transfer_id,
  676. CE_SEND_FLAG_GATHER);
  677. if (err)
  678. goto err;
  679. }
  680. /* `i` is equal to `n_items -1` after for() */
  681. ath10k_dbg(ATH10K_DBG_PCI,
  682. "pci tx item %d paddr 0x%08x len %d n_items %d\n",
  683. i, items[i].paddr, items[i].len, n_items);
  684. ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
  685. items[i].vaddr, items[i].len);
  686. err = ath10k_ce_send_nolock(ce_pipe,
  687. items[i].transfer_context,
  688. items[i].paddr,
  689. items[i].len,
  690. items[i].transfer_id,
  691. 0);
  692. if (err)
  693. goto err;
  694. spin_unlock_bh(&ar_pci->ce_lock);
  695. return 0;
  696. err:
  697. for (; i > 0; i--)
  698. __ath10k_ce_send_revert(ce_pipe);
  699. spin_unlock_bh(&ar_pci->ce_lock);
  700. return err;
  701. }
  702. static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
  703. {
  704. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  705. ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
  706. return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
  707. }
  708. static void ath10k_pci_hif_dump_area(struct ath10k *ar)
  709. {
  710. u32 reg_dump_area = 0;
  711. u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
  712. u32 host_addr;
  713. int ret;
  714. u32 i;
  715. ath10k_err("firmware crashed!\n");
  716. ath10k_err("hardware name %s version 0x%x\n",
  717. ar->hw_params.name, ar->target_version);
  718. ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
  719. host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
  720. ret = ath10k_pci_diag_read_mem(ar, host_addr,
  721. &reg_dump_area, sizeof(u32));
  722. if (ret) {
  723. ath10k_err("failed to read FW dump area address: %d\n", ret);
  724. return;
  725. }
  726. ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
  727. ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
  728. &reg_dump_values[0],
  729. REG_DUMP_COUNT_QCA988X * sizeof(u32));
  730. if (ret != 0) {
  731. ath10k_err("failed to read FW dump area: %d\n", ret);
  732. return;
  733. }
  734. BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
  735. ath10k_err("target Register Dump\n");
  736. for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
  737. ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
  738. i,
  739. reg_dump_values[i],
  740. reg_dump_values[i + 1],
  741. reg_dump_values[i + 2],
  742. reg_dump_values[i + 3]);
  743. queue_work(ar->workqueue, &ar->restart_work);
  744. }
  745. static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
  746. int force)
  747. {
  748. ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
  749. if (!force) {
  750. int resources;
  751. /*
  752. * Decide whether to actually poll for completions, or just
  753. * wait for a later chance.
  754. * If there seem to be plenty of resources left, then just wait
  755. * since checking involves reading a CE register, which is a
  756. * relatively expensive operation.
  757. */
  758. resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
  759. /*
  760. * If at least 50% of the total resources are still available,
  761. * don't bother checking again yet.
  762. */
  763. if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
  764. return;
  765. }
  766. ath10k_ce_per_engine_service(ar, pipe);
  767. }
  768. static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
  769. struct ath10k_hif_cb *callbacks)
  770. {
  771. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  772. ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
  773. memcpy(&ar_pci->msg_callbacks_current, callbacks,
  774. sizeof(ar_pci->msg_callbacks_current));
  775. }
  776. static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
  777. {
  778. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  779. const struct ce_attr *attr;
  780. struct ath10k_pci_pipe *pipe_info;
  781. int pipe_num, disable_interrupts;
  782. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  783. pipe_info = &ar_pci->pipe_info[pipe_num];
  784. /* Handle Diagnostic CE specially */
  785. if (pipe_info->ce_hdl == ar_pci->ce_diag)
  786. continue;
  787. attr = &host_ce_config_wlan[pipe_num];
  788. if (attr->src_nentries) {
  789. disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
  790. ath10k_ce_send_cb_register(pipe_info->ce_hdl,
  791. ath10k_pci_ce_send_done,
  792. disable_interrupts);
  793. }
  794. if (attr->dest_nentries)
  795. ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
  796. ath10k_pci_ce_recv_data);
  797. }
  798. return 0;
  799. }
  800. static void ath10k_pci_kill_tasklet(struct ath10k *ar)
  801. {
  802. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  803. int i;
  804. tasklet_kill(&ar_pci->intr_tq);
  805. tasklet_kill(&ar_pci->msi_fw_err);
  806. tasklet_kill(&ar_pci->early_irq_tasklet);
  807. for (i = 0; i < CE_COUNT; i++)
  808. tasklet_kill(&ar_pci->pipe_info[i].intr);
  809. }
  810. /* TODO - temporary mapping while we have too few CE's */
  811. static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
  812. u16 service_id, u8 *ul_pipe,
  813. u8 *dl_pipe, int *ul_is_polled,
  814. int *dl_is_polled)
  815. {
  816. int ret = 0;
  817. ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
  818. /* polling for received messages not supported */
  819. *dl_is_polled = 0;
  820. switch (service_id) {
  821. case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
  822. /*
  823. * Host->target HTT gets its own pipe, so it can be polled
  824. * while other pipes are interrupt driven.
  825. */
  826. *ul_pipe = 4;
  827. /*
  828. * Use the same target->host pipe for HTC ctrl, HTC raw
  829. * streams, and HTT.
  830. */
  831. *dl_pipe = 1;
  832. break;
  833. case ATH10K_HTC_SVC_ID_RSVD_CTRL:
  834. case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
  835. /*
  836. * Note: HTC_RAW_STREAMS_SVC is currently unused, and
  837. * HTC_CTRL_RSVD_SVC could share the same pipe as the
  838. * WMI services. So, if another CE is needed, change
  839. * this to *ul_pipe = 3, which frees up CE 0.
  840. */
  841. /* *ul_pipe = 3; */
  842. *ul_pipe = 0;
  843. *dl_pipe = 1;
  844. break;
  845. case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
  846. case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
  847. case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
  848. case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
  849. case ATH10K_HTC_SVC_ID_WMI_CONTROL:
  850. *ul_pipe = 3;
  851. *dl_pipe = 2;
  852. break;
  853. /* pipe 5 unused */
  854. /* pipe 6 reserved */
  855. /* pipe 7 reserved */
  856. default:
  857. ret = -1;
  858. break;
  859. }
  860. *ul_is_polled =
  861. (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
  862. return ret;
  863. }
  864. static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
  865. u8 *ul_pipe, u8 *dl_pipe)
  866. {
  867. int ul_is_polled, dl_is_polled;
  868. ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
  869. (void)ath10k_pci_hif_map_service_to_pipe(ar,
  870. ATH10K_HTC_SVC_ID_RSVD_CTRL,
  871. ul_pipe,
  872. dl_pipe,
  873. &ul_is_polled,
  874. &dl_is_polled);
  875. }
  876. static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
  877. int num)
  878. {
  879. struct ath10k *ar = pipe_info->hif_ce_state;
  880. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  881. struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
  882. struct sk_buff *skb;
  883. dma_addr_t ce_data;
  884. int i, ret = 0;
  885. if (pipe_info->buf_sz == 0)
  886. return 0;
  887. for (i = 0; i < num; i++) {
  888. skb = dev_alloc_skb(pipe_info->buf_sz);
  889. if (!skb) {
  890. ath10k_warn("failed to allocate skbuff for pipe %d\n",
  891. num);
  892. ret = -ENOMEM;
  893. goto err;
  894. }
  895. WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  896. ce_data = dma_map_single(ar->dev, skb->data,
  897. skb->len + skb_tailroom(skb),
  898. DMA_FROM_DEVICE);
  899. if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
  900. ath10k_warn("failed to DMA map sk_buff\n");
  901. dev_kfree_skb_any(skb);
  902. ret = -EIO;
  903. goto err;
  904. }
  905. ATH10K_SKB_CB(skb)->paddr = ce_data;
  906. pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
  907. pipe_info->buf_sz,
  908. PCI_DMA_FROMDEVICE);
  909. ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
  910. ce_data);
  911. if (ret) {
  912. ath10k_warn("failed to enqueue to pipe %d: %d\n",
  913. num, ret);
  914. goto err;
  915. }
  916. }
  917. return ret;
  918. err:
  919. ath10k_pci_rx_pipe_cleanup(pipe_info);
  920. return ret;
  921. }
  922. static int ath10k_pci_post_rx(struct ath10k *ar)
  923. {
  924. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  925. struct ath10k_pci_pipe *pipe_info;
  926. const struct ce_attr *attr;
  927. int pipe_num, ret = 0;
  928. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  929. pipe_info = &ar_pci->pipe_info[pipe_num];
  930. attr = &host_ce_config_wlan[pipe_num];
  931. if (attr->dest_nentries == 0)
  932. continue;
  933. ret = ath10k_pci_post_rx_pipe(pipe_info,
  934. attr->dest_nentries - 1);
  935. if (ret) {
  936. ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
  937. pipe_num, ret);
  938. for (; pipe_num >= 0; pipe_num--) {
  939. pipe_info = &ar_pci->pipe_info[pipe_num];
  940. ath10k_pci_rx_pipe_cleanup(pipe_info);
  941. }
  942. return ret;
  943. }
  944. }
  945. return 0;
  946. }
  947. static int ath10k_pci_hif_start(struct ath10k *ar)
  948. {
  949. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  950. int ret, ret_early;
  951. ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
  952. ath10k_pci_free_early_irq(ar);
  953. ath10k_pci_kill_tasklet(ar);
  954. ret = ath10k_pci_request_irq(ar);
  955. if (ret) {
  956. ath10k_warn("failed to post RX buffers for all pipes: %d\n",
  957. ret);
  958. goto err_early_irq;
  959. }
  960. ret = ath10k_pci_setup_ce_irq(ar);
  961. if (ret) {
  962. ath10k_warn("failed to setup CE interrupts: %d\n", ret);
  963. goto err_stop;
  964. }
  965. /* Post buffers once to start things off. */
  966. ret = ath10k_pci_post_rx(ar);
  967. if (ret) {
  968. ath10k_warn("failed to post RX buffers for all pipes: %d\n",
  969. ret);
  970. goto err_stop;
  971. }
  972. ar_pci->started = 1;
  973. return 0;
  974. err_stop:
  975. ath10k_ce_disable_interrupts(ar);
  976. ath10k_pci_free_irq(ar);
  977. ath10k_pci_kill_tasklet(ar);
  978. err_early_irq:
  979. /* Though there should be no interrupts (device was reset)
  980. * power_down() expects the early IRQ to be installed as per the
  981. * driver lifecycle. */
  982. ret_early = ath10k_pci_request_early_irq(ar);
  983. if (ret_early)
  984. ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
  985. return ret;
  986. }
  987. static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
  988. {
  989. struct ath10k *ar;
  990. struct ath10k_pci *ar_pci;
  991. struct ath10k_ce_pipe *ce_hdl;
  992. u32 buf_sz;
  993. struct sk_buff *netbuf;
  994. u32 ce_data;
  995. buf_sz = pipe_info->buf_sz;
  996. /* Unused Copy Engine */
  997. if (buf_sz == 0)
  998. return;
  999. ar = pipe_info->hif_ce_state;
  1000. ar_pci = ath10k_pci_priv(ar);
  1001. if (!ar_pci->started)
  1002. return;
  1003. ce_hdl = pipe_info->ce_hdl;
  1004. while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
  1005. &ce_data) == 0) {
  1006. dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
  1007. netbuf->len + skb_tailroom(netbuf),
  1008. DMA_FROM_DEVICE);
  1009. dev_kfree_skb_any(netbuf);
  1010. }
  1011. }
  1012. static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
  1013. {
  1014. struct ath10k *ar;
  1015. struct ath10k_pci *ar_pci;
  1016. struct ath10k_ce_pipe *ce_hdl;
  1017. struct sk_buff *netbuf;
  1018. u32 ce_data;
  1019. unsigned int nbytes;
  1020. unsigned int id;
  1021. u32 buf_sz;
  1022. buf_sz = pipe_info->buf_sz;
  1023. /* Unused Copy Engine */
  1024. if (buf_sz == 0)
  1025. return;
  1026. ar = pipe_info->hif_ce_state;
  1027. ar_pci = ath10k_pci_priv(ar);
  1028. if (!ar_pci->started)
  1029. return;
  1030. ce_hdl = pipe_info->ce_hdl;
  1031. while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
  1032. &ce_data, &nbytes, &id) == 0) {
  1033. /* no need to call tx completion for NULL pointers */
  1034. if (!netbuf)
  1035. continue;
  1036. ar_pci->msg_callbacks_current.tx_completion(ar,
  1037. netbuf,
  1038. id);
  1039. }
  1040. }
  1041. /*
  1042. * Cleanup residual buffers for device shutdown:
  1043. * buffers that were enqueued for receive
  1044. * buffers that were to be sent
  1045. * Note: Buffers that had completed but which were
  1046. * not yet processed are on a completion queue. They
  1047. * are handled when the completion thread shuts down.
  1048. */
  1049. static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
  1050. {
  1051. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1052. int pipe_num;
  1053. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  1054. struct ath10k_pci_pipe *pipe_info;
  1055. pipe_info = &ar_pci->pipe_info[pipe_num];
  1056. ath10k_pci_rx_pipe_cleanup(pipe_info);
  1057. ath10k_pci_tx_pipe_cleanup(pipe_info);
  1058. }
  1059. }
  1060. static void ath10k_pci_ce_deinit(struct ath10k *ar)
  1061. {
  1062. int i;
  1063. for (i = 0; i < CE_COUNT; i++)
  1064. ath10k_ce_deinit_pipe(ar, i);
  1065. }
  1066. static void ath10k_pci_hif_stop(struct ath10k *ar)
  1067. {
  1068. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1069. int ret;
  1070. ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
  1071. if (WARN_ON(!ar_pci->started))
  1072. return;
  1073. ret = ath10k_ce_disable_interrupts(ar);
  1074. if (ret)
  1075. ath10k_warn("failed to disable CE interrupts: %d\n", ret);
  1076. ath10k_pci_free_irq(ar);
  1077. ath10k_pci_kill_tasklet(ar);
  1078. ret = ath10k_pci_request_early_irq(ar);
  1079. if (ret)
  1080. ath10k_warn("failed to re-enable early irq: %d\n", ret);
  1081. /* At this point, asynchronous threads are stopped, the target should
  1082. * not DMA nor interrupt. We process the leftovers and then free
  1083. * everything else up. */
  1084. ath10k_pci_buffer_cleanup(ar);
  1085. /* Make the sure the device won't access any structures on the host by
  1086. * resetting it. The device was fed with PCI CE ringbuffer
  1087. * configuration during init. If ringbuffers are freed and the device
  1088. * were to access them this could lead to memory corruption on the
  1089. * host. */
  1090. ath10k_pci_warm_reset(ar);
  1091. ar_pci->started = 0;
  1092. }
  1093. static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
  1094. void *req, u32 req_len,
  1095. void *resp, u32 *resp_len)
  1096. {
  1097. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1098. struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
  1099. struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
  1100. struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
  1101. struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
  1102. dma_addr_t req_paddr = 0;
  1103. dma_addr_t resp_paddr = 0;
  1104. struct bmi_xfer xfer = {};
  1105. void *treq, *tresp = NULL;
  1106. int ret = 0;
  1107. might_sleep();
  1108. if (resp && !resp_len)
  1109. return -EINVAL;
  1110. if (resp && resp_len && *resp_len == 0)
  1111. return -EINVAL;
  1112. treq = kmemdup(req, req_len, GFP_KERNEL);
  1113. if (!treq)
  1114. return -ENOMEM;
  1115. req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
  1116. ret = dma_mapping_error(ar->dev, req_paddr);
  1117. if (ret)
  1118. goto err_dma;
  1119. if (resp && resp_len) {
  1120. tresp = kzalloc(*resp_len, GFP_KERNEL);
  1121. if (!tresp) {
  1122. ret = -ENOMEM;
  1123. goto err_req;
  1124. }
  1125. resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
  1126. DMA_FROM_DEVICE);
  1127. ret = dma_mapping_error(ar->dev, resp_paddr);
  1128. if (ret)
  1129. goto err_req;
  1130. xfer.wait_for_resp = true;
  1131. xfer.resp_len = 0;
  1132. ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
  1133. }
  1134. init_completion(&xfer.done);
  1135. ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
  1136. if (ret)
  1137. goto err_resp;
  1138. ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
  1139. if (ret) {
  1140. u32 unused_buffer;
  1141. unsigned int unused_nbytes;
  1142. unsigned int unused_id;
  1143. ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
  1144. &unused_nbytes, &unused_id);
  1145. } else {
  1146. /* non-zero means we did not time out */
  1147. ret = 0;
  1148. }
  1149. err_resp:
  1150. if (resp) {
  1151. u32 unused_buffer;
  1152. ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
  1153. dma_unmap_single(ar->dev, resp_paddr,
  1154. *resp_len, DMA_FROM_DEVICE);
  1155. }
  1156. err_req:
  1157. dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
  1158. if (ret == 0 && resp_len) {
  1159. *resp_len = min(*resp_len, xfer.resp_len);
  1160. memcpy(resp, tresp, xfer.resp_len);
  1161. }
  1162. err_dma:
  1163. kfree(treq);
  1164. kfree(tresp);
  1165. return ret;
  1166. }
  1167. static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
  1168. {
  1169. struct bmi_xfer *xfer;
  1170. u32 ce_data;
  1171. unsigned int nbytes;
  1172. unsigned int transfer_id;
  1173. if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
  1174. &nbytes, &transfer_id))
  1175. return;
  1176. if (xfer->wait_for_resp)
  1177. return;
  1178. complete(&xfer->done);
  1179. }
  1180. static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
  1181. {
  1182. struct bmi_xfer *xfer;
  1183. u32 ce_data;
  1184. unsigned int nbytes;
  1185. unsigned int transfer_id;
  1186. unsigned int flags;
  1187. if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
  1188. &nbytes, &transfer_id, &flags))
  1189. return;
  1190. if (!xfer->wait_for_resp) {
  1191. ath10k_warn("unexpected: BMI data received; ignoring\n");
  1192. return;
  1193. }
  1194. xfer->resp_len = nbytes;
  1195. complete(&xfer->done);
  1196. }
  1197. static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
  1198. struct ath10k_ce_pipe *rx_pipe,
  1199. struct bmi_xfer *xfer)
  1200. {
  1201. unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
  1202. while (time_before_eq(jiffies, timeout)) {
  1203. ath10k_pci_bmi_send_done(tx_pipe);
  1204. ath10k_pci_bmi_recv_data(rx_pipe);
  1205. if (completion_done(&xfer->done))
  1206. return 0;
  1207. schedule();
  1208. }
  1209. return -ETIMEDOUT;
  1210. }
  1211. /*
  1212. * Map from service/endpoint to Copy Engine.
  1213. * This table is derived from the CE_PCI TABLE, above.
  1214. * It is passed to the Target at startup for use by firmware.
  1215. */
  1216. static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
  1217. {
  1218. ATH10K_HTC_SVC_ID_WMI_DATA_VO,
  1219. PIPEDIR_OUT, /* out = UL = host -> target */
  1220. 3,
  1221. },
  1222. {
  1223. ATH10K_HTC_SVC_ID_WMI_DATA_VO,
  1224. PIPEDIR_IN, /* in = DL = target -> host */
  1225. 2,
  1226. },
  1227. {
  1228. ATH10K_HTC_SVC_ID_WMI_DATA_BK,
  1229. PIPEDIR_OUT, /* out = UL = host -> target */
  1230. 3,
  1231. },
  1232. {
  1233. ATH10K_HTC_SVC_ID_WMI_DATA_BK,
  1234. PIPEDIR_IN, /* in = DL = target -> host */
  1235. 2,
  1236. },
  1237. {
  1238. ATH10K_HTC_SVC_ID_WMI_DATA_BE,
  1239. PIPEDIR_OUT, /* out = UL = host -> target */
  1240. 3,
  1241. },
  1242. {
  1243. ATH10K_HTC_SVC_ID_WMI_DATA_BE,
  1244. PIPEDIR_IN, /* in = DL = target -> host */
  1245. 2,
  1246. },
  1247. {
  1248. ATH10K_HTC_SVC_ID_WMI_DATA_VI,
  1249. PIPEDIR_OUT, /* out = UL = host -> target */
  1250. 3,
  1251. },
  1252. {
  1253. ATH10K_HTC_SVC_ID_WMI_DATA_VI,
  1254. PIPEDIR_IN, /* in = DL = target -> host */
  1255. 2,
  1256. },
  1257. {
  1258. ATH10K_HTC_SVC_ID_WMI_CONTROL,
  1259. PIPEDIR_OUT, /* out = UL = host -> target */
  1260. 3,
  1261. },
  1262. {
  1263. ATH10K_HTC_SVC_ID_WMI_CONTROL,
  1264. PIPEDIR_IN, /* in = DL = target -> host */
  1265. 2,
  1266. },
  1267. {
  1268. ATH10K_HTC_SVC_ID_RSVD_CTRL,
  1269. PIPEDIR_OUT, /* out = UL = host -> target */
  1270. 0, /* could be moved to 3 (share with WMI) */
  1271. },
  1272. {
  1273. ATH10K_HTC_SVC_ID_RSVD_CTRL,
  1274. PIPEDIR_IN, /* in = DL = target -> host */
  1275. 1,
  1276. },
  1277. {
  1278. ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
  1279. PIPEDIR_OUT, /* out = UL = host -> target */
  1280. 0,
  1281. },
  1282. {
  1283. ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
  1284. PIPEDIR_IN, /* in = DL = target -> host */
  1285. 1,
  1286. },
  1287. {
  1288. ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
  1289. PIPEDIR_OUT, /* out = UL = host -> target */
  1290. 4,
  1291. },
  1292. {
  1293. ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
  1294. PIPEDIR_IN, /* in = DL = target -> host */
  1295. 1,
  1296. },
  1297. /* (Additions here) */
  1298. { /* Must be last */
  1299. 0,
  1300. 0,
  1301. 0,
  1302. },
  1303. };
  1304. /*
  1305. * Send an interrupt to the device to wake up the Target CPU
  1306. * so it has an opportunity to notice any changed state.
  1307. */
  1308. static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
  1309. {
  1310. int ret;
  1311. u32 core_ctrl;
  1312. ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
  1313. CORE_CTRL_ADDRESS,
  1314. &core_ctrl);
  1315. if (ret) {
  1316. ath10k_warn("failed to read core_ctrl: %d\n", ret);
  1317. return ret;
  1318. }
  1319. /* A_INUM_FIRMWARE interrupt to Target CPU */
  1320. core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
  1321. ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
  1322. CORE_CTRL_ADDRESS,
  1323. core_ctrl);
  1324. if (ret) {
  1325. ath10k_warn("failed to set target CPU interrupt mask: %d\n",
  1326. ret);
  1327. return ret;
  1328. }
  1329. return 0;
  1330. }
  1331. static int ath10k_pci_init_config(struct ath10k *ar)
  1332. {
  1333. u32 interconnect_targ_addr;
  1334. u32 pcie_state_targ_addr = 0;
  1335. u32 pipe_cfg_targ_addr = 0;
  1336. u32 svc_to_pipe_map = 0;
  1337. u32 pcie_config_flags = 0;
  1338. u32 ealloc_value;
  1339. u32 ealloc_targ_addr;
  1340. u32 flag2_value;
  1341. u32 flag2_targ_addr;
  1342. int ret = 0;
  1343. /* Download to Target the CE Config and the service-to-CE map */
  1344. interconnect_targ_addr =
  1345. host_interest_item_address(HI_ITEM(hi_interconnect_state));
  1346. /* Supply Target-side CE configuration */
  1347. ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
  1348. &pcie_state_targ_addr);
  1349. if (ret != 0) {
  1350. ath10k_err("Failed to get pcie state addr: %d\n", ret);
  1351. return ret;
  1352. }
  1353. if (pcie_state_targ_addr == 0) {
  1354. ret = -EIO;
  1355. ath10k_err("Invalid pcie state addr\n");
  1356. return ret;
  1357. }
  1358. ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
  1359. offsetof(struct pcie_state,
  1360. pipe_cfg_addr),
  1361. &pipe_cfg_targ_addr);
  1362. if (ret != 0) {
  1363. ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
  1364. return ret;
  1365. }
  1366. if (pipe_cfg_targ_addr == 0) {
  1367. ret = -EIO;
  1368. ath10k_err("Invalid pipe cfg addr\n");
  1369. return ret;
  1370. }
  1371. ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
  1372. target_ce_config_wlan,
  1373. sizeof(target_ce_config_wlan));
  1374. if (ret != 0) {
  1375. ath10k_err("Failed to write pipe cfg: %d\n", ret);
  1376. return ret;
  1377. }
  1378. ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
  1379. offsetof(struct pcie_state,
  1380. svc_to_pipe_map),
  1381. &svc_to_pipe_map);
  1382. if (ret != 0) {
  1383. ath10k_err("Failed to get svc/pipe map: %d\n", ret);
  1384. return ret;
  1385. }
  1386. if (svc_to_pipe_map == 0) {
  1387. ret = -EIO;
  1388. ath10k_err("Invalid svc_to_pipe map\n");
  1389. return ret;
  1390. }
  1391. ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
  1392. target_service_to_ce_map_wlan,
  1393. sizeof(target_service_to_ce_map_wlan));
  1394. if (ret != 0) {
  1395. ath10k_err("Failed to write svc/pipe map: %d\n", ret);
  1396. return ret;
  1397. }
  1398. ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
  1399. offsetof(struct pcie_state,
  1400. config_flags),
  1401. &pcie_config_flags);
  1402. if (ret != 0) {
  1403. ath10k_err("Failed to get pcie config_flags: %d\n", ret);
  1404. return ret;
  1405. }
  1406. pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
  1407. ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
  1408. offsetof(struct pcie_state, config_flags),
  1409. &pcie_config_flags,
  1410. sizeof(pcie_config_flags));
  1411. if (ret != 0) {
  1412. ath10k_err("Failed to write pcie config_flags: %d\n", ret);
  1413. return ret;
  1414. }
  1415. /* configure early allocation */
  1416. ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
  1417. ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
  1418. if (ret != 0) {
  1419. ath10k_err("Faile to get early alloc val: %d\n", ret);
  1420. return ret;
  1421. }
  1422. /* first bank is switched to IRAM */
  1423. ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
  1424. HI_EARLY_ALLOC_MAGIC_MASK);
  1425. ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
  1426. HI_EARLY_ALLOC_IRAM_BANKS_MASK);
  1427. ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
  1428. if (ret != 0) {
  1429. ath10k_err("Failed to set early alloc val: %d\n", ret);
  1430. return ret;
  1431. }
  1432. /* Tell Target to proceed with initialization */
  1433. flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
  1434. ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
  1435. if (ret != 0) {
  1436. ath10k_err("Failed to get option val: %d\n", ret);
  1437. return ret;
  1438. }
  1439. flag2_value |= HI_OPTION_EARLY_CFG_DONE;
  1440. ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
  1441. if (ret != 0) {
  1442. ath10k_err("Failed to set option val: %d\n", ret);
  1443. return ret;
  1444. }
  1445. return 0;
  1446. }
  1447. static int ath10k_pci_alloc_ce(struct ath10k *ar)
  1448. {
  1449. int i, ret;
  1450. for (i = 0; i < CE_COUNT; i++) {
  1451. ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
  1452. if (ret) {
  1453. ath10k_err("failed to allocate copy engine pipe %d: %d\n",
  1454. i, ret);
  1455. return ret;
  1456. }
  1457. }
  1458. return 0;
  1459. }
  1460. static void ath10k_pci_free_ce(struct ath10k *ar)
  1461. {
  1462. int i;
  1463. for (i = 0; i < CE_COUNT; i++)
  1464. ath10k_ce_free_pipe(ar, i);
  1465. }
  1466. static int ath10k_pci_ce_init(struct ath10k *ar)
  1467. {
  1468. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1469. struct ath10k_pci_pipe *pipe_info;
  1470. const struct ce_attr *attr;
  1471. int pipe_num, ret;
  1472. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  1473. pipe_info = &ar_pci->pipe_info[pipe_num];
  1474. pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
  1475. pipe_info->pipe_num = pipe_num;
  1476. pipe_info->hif_ce_state = ar;
  1477. attr = &host_ce_config_wlan[pipe_num];
  1478. ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
  1479. if (ret) {
  1480. ath10k_err("failed to initialize copy engine pipe %d: %d\n",
  1481. pipe_num, ret);
  1482. return ret;
  1483. }
  1484. if (pipe_num == CE_COUNT - 1) {
  1485. /*
  1486. * Reserve the ultimate CE for
  1487. * diagnostic Window support
  1488. */
  1489. ar_pci->ce_diag = pipe_info->ce_hdl;
  1490. continue;
  1491. }
  1492. pipe_info->buf_sz = (size_t) (attr->src_sz_max);
  1493. }
  1494. return 0;
  1495. }
  1496. static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
  1497. {
  1498. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1499. u32 fw_indicator;
  1500. ath10k_pci_wake(ar);
  1501. fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
  1502. if (fw_indicator & FW_IND_EVENT_PENDING) {
  1503. /* ACK: clear Target-side pending event */
  1504. ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
  1505. fw_indicator & ~FW_IND_EVENT_PENDING);
  1506. if (ar_pci->started) {
  1507. ath10k_pci_hif_dump_area(ar);
  1508. } else {
  1509. /*
  1510. * Probable Target failure before we're prepared
  1511. * to handle it. Generally unexpected.
  1512. */
  1513. ath10k_warn("early firmware event indicated\n");
  1514. }
  1515. }
  1516. ath10k_pci_sleep(ar);
  1517. }
  1518. /* this function effectively clears target memory controller assert line */
  1519. static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
  1520. {
  1521. u32 val;
  1522. val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
  1523. ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
  1524. val | SOC_RESET_CONTROL_SI0_RST_MASK);
  1525. val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
  1526. msleep(10);
  1527. val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
  1528. ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
  1529. val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
  1530. val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
  1531. msleep(10);
  1532. }
  1533. static int ath10k_pci_warm_reset(struct ath10k *ar)
  1534. {
  1535. int ret = 0;
  1536. u32 val;
  1537. ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
  1538. ret = ath10k_do_pci_wake(ar);
  1539. if (ret) {
  1540. ath10k_err("failed to wake up target: %d\n", ret);
  1541. return ret;
  1542. }
  1543. /* debug */
  1544. val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  1545. PCIE_INTR_CAUSE_ADDRESS);
  1546. ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
  1547. val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  1548. CPU_INTR_ADDRESS);
  1549. ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
  1550. val);
  1551. /* disable pending irqs */
  1552. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
  1553. PCIE_INTR_ENABLE_ADDRESS, 0);
  1554. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
  1555. PCIE_INTR_CLR_ADDRESS, ~0);
  1556. msleep(100);
  1557. /* clear fw indicator */
  1558. ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
  1559. /* clear target LF timer interrupts */
  1560. val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  1561. SOC_LF_TIMER_CONTROL0_ADDRESS);
  1562. ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
  1563. SOC_LF_TIMER_CONTROL0_ADDRESS,
  1564. val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
  1565. /* reset CE */
  1566. val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  1567. SOC_RESET_CONTROL_ADDRESS);
  1568. ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
  1569. val | SOC_RESET_CONTROL_CE_RST_MASK);
  1570. val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  1571. SOC_RESET_CONTROL_ADDRESS);
  1572. msleep(10);
  1573. /* unreset CE */
  1574. ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
  1575. val & ~SOC_RESET_CONTROL_CE_RST_MASK);
  1576. val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  1577. SOC_RESET_CONTROL_ADDRESS);
  1578. msleep(10);
  1579. ath10k_pci_warm_reset_si0(ar);
  1580. /* debug */
  1581. val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  1582. PCIE_INTR_CAUSE_ADDRESS);
  1583. ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
  1584. val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  1585. CPU_INTR_ADDRESS);
  1586. ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
  1587. val);
  1588. /* CPU warm reset */
  1589. val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  1590. SOC_RESET_CONTROL_ADDRESS);
  1591. ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
  1592. val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
  1593. val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  1594. SOC_RESET_CONTROL_ADDRESS);
  1595. ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
  1596. msleep(100);
  1597. ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
  1598. ath10k_do_pci_sleep(ar);
  1599. return ret;
  1600. }
  1601. static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
  1602. {
  1603. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1604. const char *irq_mode;
  1605. int ret;
  1606. /*
  1607. * Bring the target up cleanly.
  1608. *
  1609. * The target may be in an undefined state with an AUX-powered Target
  1610. * and a Host in WoW mode. If the Host crashes, loses power, or is
  1611. * restarted (without unloading the driver) then the Target is left
  1612. * (aux) powered and running. On a subsequent driver load, the Target
  1613. * is in an unexpected state. We try to catch that here in order to
  1614. * reset the Target and retry the probe.
  1615. */
  1616. if (cold_reset)
  1617. ret = ath10k_pci_cold_reset(ar);
  1618. else
  1619. ret = ath10k_pci_warm_reset(ar);
  1620. if (ret) {
  1621. ath10k_err("failed to reset target: %d\n", ret);
  1622. goto err;
  1623. }
  1624. if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  1625. /* Force AWAKE forever */
  1626. ath10k_do_pci_wake(ar);
  1627. ret = ath10k_pci_ce_init(ar);
  1628. if (ret) {
  1629. ath10k_err("failed to initialize CE: %d\n", ret);
  1630. goto err_ps;
  1631. }
  1632. ret = ath10k_ce_disable_interrupts(ar);
  1633. if (ret) {
  1634. ath10k_err("failed to disable CE interrupts: %d\n", ret);
  1635. goto err_ce;
  1636. }
  1637. ret = ath10k_pci_init_irq(ar);
  1638. if (ret) {
  1639. ath10k_err("failed to init irqs: %d\n", ret);
  1640. goto err_ce;
  1641. }
  1642. ret = ath10k_pci_request_early_irq(ar);
  1643. if (ret) {
  1644. ath10k_err("failed to request early irq: %d\n", ret);
  1645. goto err_deinit_irq;
  1646. }
  1647. ret = ath10k_pci_wait_for_target_init(ar);
  1648. if (ret) {
  1649. ath10k_err("failed to wait for target to init: %d\n", ret);
  1650. goto err_free_early_irq;
  1651. }
  1652. ret = ath10k_pci_init_config(ar);
  1653. if (ret) {
  1654. ath10k_err("failed to setup init config: %d\n", ret);
  1655. goto err_free_early_irq;
  1656. }
  1657. ret = ath10k_pci_wake_target_cpu(ar);
  1658. if (ret) {
  1659. ath10k_err("could not wake up target CPU: %d\n", ret);
  1660. goto err_free_early_irq;
  1661. }
  1662. if (ar_pci->num_msi_intrs > 1)
  1663. irq_mode = "MSI-X";
  1664. else if (ar_pci->num_msi_intrs == 1)
  1665. irq_mode = "MSI";
  1666. else
  1667. irq_mode = "legacy";
  1668. if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
  1669. ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
  1670. irq_mode, ath10k_pci_irq_mode,
  1671. ath10k_pci_reset_mode);
  1672. return 0;
  1673. err_free_early_irq:
  1674. ath10k_pci_free_early_irq(ar);
  1675. err_deinit_irq:
  1676. ath10k_pci_deinit_irq(ar);
  1677. err_ce:
  1678. ath10k_pci_ce_deinit(ar);
  1679. ath10k_pci_warm_reset(ar);
  1680. err_ps:
  1681. if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  1682. ath10k_do_pci_sleep(ar);
  1683. err:
  1684. return ret;
  1685. }
  1686. static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
  1687. {
  1688. int i, ret;
  1689. /*
  1690. * Sometime warm reset succeeds after retries.
  1691. *
  1692. * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
  1693. * at first try.
  1694. */
  1695. for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
  1696. ret = __ath10k_pci_hif_power_up(ar, false);
  1697. if (ret == 0)
  1698. break;
  1699. ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
  1700. i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
  1701. }
  1702. return ret;
  1703. }
  1704. static int ath10k_pci_hif_power_up(struct ath10k *ar)
  1705. {
  1706. int ret;
  1707. ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
  1708. /*
  1709. * Hardware CUS232 version 2 has some issues with cold reset and the
  1710. * preferred (and safer) way to perform a device reset is through a
  1711. * warm reset.
  1712. *
  1713. * Warm reset doesn't always work though so fall back to cold reset may
  1714. * be necessary.
  1715. */
  1716. ret = ath10k_pci_hif_power_up_warm(ar);
  1717. if (ret) {
  1718. ath10k_warn("failed to power up target using warm reset: %d\n",
  1719. ret);
  1720. if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
  1721. return ret;
  1722. ath10k_warn("trying cold reset\n");
  1723. ret = __ath10k_pci_hif_power_up(ar, true);
  1724. if (ret) {
  1725. ath10k_err("failed to power up target using cold reset too (%d)\n",
  1726. ret);
  1727. return ret;
  1728. }
  1729. }
  1730. return 0;
  1731. }
  1732. static void ath10k_pci_hif_power_down(struct ath10k *ar)
  1733. {
  1734. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1735. ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
  1736. ath10k_pci_free_early_irq(ar);
  1737. ath10k_pci_kill_tasklet(ar);
  1738. ath10k_pci_deinit_irq(ar);
  1739. ath10k_pci_ce_deinit(ar);
  1740. ath10k_pci_warm_reset(ar);
  1741. if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  1742. ath10k_do_pci_sleep(ar);
  1743. }
  1744. #ifdef CONFIG_PM
  1745. #define ATH10K_PCI_PM_CONTROL 0x44
  1746. static int ath10k_pci_hif_suspend(struct ath10k *ar)
  1747. {
  1748. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1749. struct pci_dev *pdev = ar_pci->pdev;
  1750. u32 val;
  1751. pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
  1752. if ((val & 0x000000ff) != 0x3) {
  1753. pci_save_state(pdev);
  1754. pci_disable_device(pdev);
  1755. pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
  1756. (val & 0xffffff00) | 0x03);
  1757. }
  1758. return 0;
  1759. }
  1760. static int ath10k_pci_hif_resume(struct ath10k *ar)
  1761. {
  1762. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1763. struct pci_dev *pdev = ar_pci->pdev;
  1764. u32 val;
  1765. pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
  1766. if ((val & 0x000000ff) != 0) {
  1767. pci_restore_state(pdev);
  1768. pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
  1769. val & 0xffffff00);
  1770. /*
  1771. * Suspend/Resume resets the PCI configuration space,
  1772. * so we have to re-disable the RETRY_TIMEOUT register (0x41)
  1773. * to keep PCI Tx retries from interfering with C3 CPU state
  1774. */
  1775. pci_read_config_dword(pdev, 0x40, &val);
  1776. if ((val & 0x0000ff00) != 0)
  1777. pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
  1778. }
  1779. return 0;
  1780. }
  1781. #endif
  1782. static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
  1783. .tx_sg = ath10k_pci_hif_tx_sg,
  1784. .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
  1785. .start = ath10k_pci_hif_start,
  1786. .stop = ath10k_pci_hif_stop,
  1787. .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
  1788. .get_default_pipe = ath10k_pci_hif_get_default_pipe,
  1789. .send_complete_check = ath10k_pci_hif_send_complete_check,
  1790. .set_callbacks = ath10k_pci_hif_set_callbacks,
  1791. .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
  1792. .power_up = ath10k_pci_hif_power_up,
  1793. .power_down = ath10k_pci_hif_power_down,
  1794. #ifdef CONFIG_PM
  1795. .suspend = ath10k_pci_hif_suspend,
  1796. .resume = ath10k_pci_hif_resume,
  1797. #endif
  1798. };
  1799. static void ath10k_pci_ce_tasklet(unsigned long ptr)
  1800. {
  1801. struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
  1802. struct ath10k_pci *ar_pci = pipe->ar_pci;
  1803. ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
  1804. }
  1805. static void ath10k_msi_err_tasklet(unsigned long data)
  1806. {
  1807. struct ath10k *ar = (struct ath10k *)data;
  1808. ath10k_pci_fw_interrupt_handler(ar);
  1809. }
  1810. /*
  1811. * Handler for a per-engine interrupt on a PARTICULAR CE.
  1812. * This is used in cases where each CE has a private MSI interrupt.
  1813. */
  1814. static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
  1815. {
  1816. struct ath10k *ar = arg;
  1817. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1818. int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
  1819. if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
  1820. ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
  1821. return IRQ_HANDLED;
  1822. }
  1823. /*
  1824. * NOTE: We are able to derive ce_id from irq because we
  1825. * use a one-to-one mapping for CE's 0..5.
  1826. * CE's 6 & 7 do not use interrupts at all.
  1827. *
  1828. * This mapping must be kept in sync with the mapping
  1829. * used by firmware.
  1830. */
  1831. tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
  1832. return IRQ_HANDLED;
  1833. }
  1834. static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
  1835. {
  1836. struct ath10k *ar = arg;
  1837. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1838. tasklet_schedule(&ar_pci->msi_fw_err);
  1839. return IRQ_HANDLED;
  1840. }
  1841. /*
  1842. * Top-level interrupt handler for all PCI interrupts from a Target.
  1843. * When a block of MSI interrupts is allocated, this top-level handler
  1844. * is not used; instead, we directly call the correct sub-handler.
  1845. */
  1846. static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
  1847. {
  1848. struct ath10k *ar = arg;
  1849. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1850. if (ar_pci->num_msi_intrs == 0) {
  1851. if (!ath10k_pci_irq_pending(ar))
  1852. return IRQ_NONE;
  1853. ath10k_pci_disable_and_clear_legacy_irq(ar);
  1854. }
  1855. tasklet_schedule(&ar_pci->intr_tq);
  1856. return IRQ_HANDLED;
  1857. }
  1858. static void ath10k_pci_early_irq_tasklet(unsigned long data)
  1859. {
  1860. struct ath10k *ar = (struct ath10k *)data;
  1861. u32 fw_ind;
  1862. int ret;
  1863. ret = ath10k_pci_wake(ar);
  1864. if (ret) {
  1865. ath10k_warn("failed to wake target in early irq tasklet: %d\n",
  1866. ret);
  1867. return;
  1868. }
  1869. fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
  1870. if (fw_ind & FW_IND_EVENT_PENDING) {
  1871. ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
  1872. fw_ind & ~FW_IND_EVENT_PENDING);
  1873. ath10k_pci_hif_dump_area(ar);
  1874. }
  1875. ath10k_pci_sleep(ar);
  1876. ath10k_pci_enable_legacy_irq(ar);
  1877. }
  1878. static void ath10k_pci_tasklet(unsigned long data)
  1879. {
  1880. struct ath10k *ar = (struct ath10k *)data;
  1881. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1882. ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
  1883. ath10k_ce_per_engine_service_any(ar);
  1884. /* Re-enable legacy irq that was disabled in the irq handler */
  1885. if (ar_pci->num_msi_intrs == 0)
  1886. ath10k_pci_enable_legacy_irq(ar);
  1887. }
  1888. static int ath10k_pci_request_irq_msix(struct ath10k *ar)
  1889. {
  1890. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1891. int ret, i;
  1892. ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
  1893. ath10k_pci_msi_fw_handler,
  1894. IRQF_SHARED, "ath10k_pci", ar);
  1895. if (ret) {
  1896. ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
  1897. ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
  1898. return ret;
  1899. }
  1900. for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
  1901. ret = request_irq(ar_pci->pdev->irq + i,
  1902. ath10k_pci_per_engine_handler,
  1903. IRQF_SHARED, "ath10k_pci", ar);
  1904. if (ret) {
  1905. ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
  1906. ar_pci->pdev->irq + i, ret);
  1907. for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
  1908. free_irq(ar_pci->pdev->irq + i, ar);
  1909. free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
  1910. return ret;
  1911. }
  1912. }
  1913. return 0;
  1914. }
  1915. static int ath10k_pci_request_irq_msi(struct ath10k *ar)
  1916. {
  1917. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1918. int ret;
  1919. ret = request_irq(ar_pci->pdev->irq,
  1920. ath10k_pci_interrupt_handler,
  1921. IRQF_SHARED, "ath10k_pci", ar);
  1922. if (ret) {
  1923. ath10k_warn("failed to request MSI irq %d: %d\n",
  1924. ar_pci->pdev->irq, ret);
  1925. return ret;
  1926. }
  1927. return 0;
  1928. }
  1929. static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
  1930. {
  1931. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1932. int ret;
  1933. ret = request_irq(ar_pci->pdev->irq,
  1934. ath10k_pci_interrupt_handler,
  1935. IRQF_SHARED, "ath10k_pci", ar);
  1936. if (ret) {
  1937. ath10k_warn("failed to request legacy irq %d: %d\n",
  1938. ar_pci->pdev->irq, ret);
  1939. return ret;
  1940. }
  1941. return 0;
  1942. }
  1943. static int ath10k_pci_request_irq(struct ath10k *ar)
  1944. {
  1945. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1946. switch (ar_pci->num_msi_intrs) {
  1947. case 0:
  1948. return ath10k_pci_request_irq_legacy(ar);
  1949. case 1:
  1950. return ath10k_pci_request_irq_msi(ar);
  1951. case MSI_NUM_REQUEST:
  1952. return ath10k_pci_request_irq_msix(ar);
  1953. }
  1954. ath10k_warn("unknown irq configuration upon request\n");
  1955. return -EINVAL;
  1956. }
  1957. static void ath10k_pci_free_irq(struct ath10k *ar)
  1958. {
  1959. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1960. int i;
  1961. /* There's at least one interrupt irregardless whether its legacy INTR
  1962. * or MSI or MSI-X */
  1963. for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
  1964. free_irq(ar_pci->pdev->irq + i, ar);
  1965. }
  1966. static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
  1967. {
  1968. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1969. int i;
  1970. tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
  1971. tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
  1972. (unsigned long)ar);
  1973. tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
  1974. (unsigned long)ar);
  1975. for (i = 0; i < CE_COUNT; i++) {
  1976. ar_pci->pipe_info[i].ar_pci = ar_pci;
  1977. tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
  1978. (unsigned long)&ar_pci->pipe_info[i]);
  1979. }
  1980. }
  1981. static int ath10k_pci_init_irq(struct ath10k *ar)
  1982. {
  1983. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1984. bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
  1985. ar_pci->features);
  1986. int ret;
  1987. ath10k_pci_init_irq_tasklets(ar);
  1988. if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
  1989. !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
  1990. ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
  1991. /* Try MSI-X */
  1992. if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
  1993. ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
  1994. ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
  1995. ar_pci->num_msi_intrs);
  1996. if (ret > 0)
  1997. return 0;
  1998. /* fall-through */
  1999. }
  2000. /* Try MSI */
  2001. if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
  2002. ar_pci->num_msi_intrs = 1;
  2003. ret = pci_enable_msi(ar_pci->pdev);
  2004. if (ret == 0)
  2005. return 0;
  2006. /* fall-through */
  2007. }
  2008. /* Try legacy irq
  2009. *
  2010. * A potential race occurs here: The CORE_BASE write
  2011. * depends on target correctly decoding AXI address but
  2012. * host won't know when target writes BAR to CORE_CTRL.
  2013. * This write might get lost if target has NOT written BAR.
  2014. * For now, fix the race by repeating the write in below
  2015. * synchronization checking. */
  2016. ar_pci->num_msi_intrs = 0;
  2017. ret = ath10k_pci_wake(ar);
  2018. if (ret) {
  2019. ath10k_warn("failed to wake target: %d\n", ret);
  2020. return ret;
  2021. }
  2022. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
  2023. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  2024. ath10k_pci_sleep(ar);
  2025. return 0;
  2026. }
  2027. static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
  2028. {
  2029. int ret;
  2030. ret = ath10k_pci_wake(ar);
  2031. if (ret) {
  2032. ath10k_warn("failed to wake target: %d\n", ret);
  2033. return ret;
  2034. }
  2035. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
  2036. 0);
  2037. ath10k_pci_sleep(ar);
  2038. return 0;
  2039. }
  2040. static int ath10k_pci_deinit_irq(struct ath10k *ar)
  2041. {
  2042. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  2043. switch (ar_pci->num_msi_intrs) {
  2044. case 0:
  2045. return ath10k_pci_deinit_irq_legacy(ar);
  2046. case 1:
  2047. /* fall-through */
  2048. case MSI_NUM_REQUEST:
  2049. pci_disable_msi(ar_pci->pdev);
  2050. return 0;
  2051. default:
  2052. pci_disable_msi(ar_pci->pdev);
  2053. }
  2054. ath10k_warn("unknown irq configuration upon deinit\n");
  2055. return -EINVAL;
  2056. }
  2057. static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
  2058. {
  2059. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  2060. unsigned long timeout;
  2061. int ret;
  2062. u32 val;
  2063. ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
  2064. ret = ath10k_pci_wake(ar);
  2065. if (ret) {
  2066. ath10k_err("failed to wake up target for init: %d\n", ret);
  2067. return ret;
  2068. }
  2069. timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
  2070. do {
  2071. val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
  2072. ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
  2073. /* target should never return this */
  2074. if (val == 0xffffffff)
  2075. continue;
  2076. /* the device has crashed so don't bother trying anymore */
  2077. if (val & FW_IND_EVENT_PENDING)
  2078. break;
  2079. if (val & FW_IND_INITIALIZED)
  2080. break;
  2081. if (ar_pci->num_msi_intrs == 0)
  2082. /* Fix potential race by repeating CORE_BASE writes */
  2083. ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
  2084. PCIE_INTR_FIRMWARE_MASK |
  2085. PCIE_INTR_CE_MASK_ALL);
  2086. mdelay(10);
  2087. } while (time_before(jiffies, timeout));
  2088. if (val == 0xffffffff) {
  2089. ath10k_err("failed to read device register, device is gone\n");
  2090. ret = -EIO;
  2091. goto out;
  2092. }
  2093. if (val & FW_IND_EVENT_PENDING) {
  2094. ath10k_warn("device has crashed during init\n");
  2095. ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
  2096. val & ~FW_IND_EVENT_PENDING);
  2097. ath10k_pci_hif_dump_area(ar);
  2098. ret = -ECOMM;
  2099. goto out;
  2100. }
  2101. if (!(val & FW_IND_INITIALIZED)) {
  2102. ath10k_err("failed to receive initialized event from target: %08x\n",
  2103. val);
  2104. ret = -ETIMEDOUT;
  2105. goto out;
  2106. }
  2107. ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
  2108. out:
  2109. ath10k_pci_sleep(ar);
  2110. return ret;
  2111. }
  2112. static int ath10k_pci_cold_reset(struct ath10k *ar)
  2113. {
  2114. int i, ret;
  2115. u32 val;
  2116. ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
  2117. ret = ath10k_do_pci_wake(ar);
  2118. if (ret) {
  2119. ath10k_err("failed to wake up target: %d\n",
  2120. ret);
  2121. return ret;
  2122. }
  2123. /* Put Target, including PCIe, into RESET. */
  2124. val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
  2125. val |= 1;
  2126. ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
  2127. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  2128. if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
  2129. RTC_STATE_COLD_RESET_MASK)
  2130. break;
  2131. msleep(1);
  2132. }
  2133. /* Pull Target, including PCIe, out of RESET. */
  2134. val &= ~1;
  2135. ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
  2136. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  2137. if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
  2138. RTC_STATE_COLD_RESET_MASK))
  2139. break;
  2140. msleep(1);
  2141. }
  2142. ath10k_do_pci_sleep(ar);
  2143. ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
  2144. return 0;
  2145. }
  2146. static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
  2147. {
  2148. int i;
  2149. for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
  2150. if (!test_bit(i, ar_pci->features))
  2151. continue;
  2152. switch (i) {
  2153. case ATH10K_PCI_FEATURE_MSI_X:
  2154. ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
  2155. break;
  2156. case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
  2157. ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
  2158. break;
  2159. }
  2160. }
  2161. }
  2162. static int ath10k_pci_probe(struct pci_dev *pdev,
  2163. const struct pci_device_id *pci_dev)
  2164. {
  2165. void __iomem *mem;
  2166. int ret = 0;
  2167. struct ath10k *ar;
  2168. struct ath10k_pci *ar_pci;
  2169. u32 lcr_val, chip_id;
  2170. ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
  2171. ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
  2172. if (ar_pci == NULL)
  2173. return -ENOMEM;
  2174. ar_pci->pdev = pdev;
  2175. ar_pci->dev = &pdev->dev;
  2176. switch (pci_dev->device) {
  2177. case QCA988X_2_0_DEVICE_ID:
  2178. set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
  2179. break;
  2180. default:
  2181. ret = -ENODEV;
  2182. ath10k_err("Unknown device ID: %d\n", pci_dev->device);
  2183. goto err_ar_pci;
  2184. }
  2185. if (ath10k_pci_target_ps)
  2186. set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
  2187. ath10k_pci_dump_features(ar_pci);
  2188. ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
  2189. if (!ar) {
  2190. ath10k_err("failed to create driver core\n");
  2191. ret = -EINVAL;
  2192. goto err_ar_pci;
  2193. }
  2194. ar_pci->ar = ar;
  2195. atomic_set(&ar_pci->keep_awake_count, 0);
  2196. pci_set_drvdata(pdev, ar);
  2197. ret = pci_enable_device(pdev);
  2198. if (ret) {
  2199. ath10k_err("failed to enable PCI device: %d\n", ret);
  2200. goto err_ar;
  2201. }
  2202. /* Request MMIO resources */
  2203. ret = pci_request_region(pdev, BAR_NUM, "ath");
  2204. if (ret) {
  2205. ath10k_err("failed to request MMIO region: %d\n", ret);
  2206. goto err_device;
  2207. }
  2208. /*
  2209. * Target structures have a limit of 32 bit DMA pointers.
  2210. * DMA pointers can be wider than 32 bits by default on some systems.
  2211. */
  2212. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  2213. if (ret) {
  2214. ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
  2215. goto err_region;
  2216. }
  2217. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  2218. if (ret) {
  2219. ath10k_err("failed to set consistent DMA mask to 32-bit\n");
  2220. goto err_region;
  2221. }
  2222. /* Set bus master bit in PCI_COMMAND to enable DMA */
  2223. pci_set_master(pdev);
  2224. /*
  2225. * Temporary FIX: disable ASPM
  2226. * Will be removed after the OTP is programmed
  2227. */
  2228. pci_read_config_dword(pdev, 0x80, &lcr_val);
  2229. pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
  2230. /* Arrange for access to Target SoC registers. */
  2231. mem = pci_iomap(pdev, BAR_NUM, 0);
  2232. if (!mem) {
  2233. ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
  2234. ret = -EIO;
  2235. goto err_master;
  2236. }
  2237. ar_pci->mem = mem;
  2238. spin_lock_init(&ar_pci->ce_lock);
  2239. ret = ath10k_do_pci_wake(ar);
  2240. if (ret) {
  2241. ath10k_err("Failed to get chip id: %d\n", ret);
  2242. goto err_iomap;
  2243. }
  2244. chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
  2245. ath10k_do_pci_sleep(ar);
  2246. ret = ath10k_pci_alloc_ce(ar);
  2247. if (ret) {
  2248. ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
  2249. goto err_iomap;
  2250. }
  2251. ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
  2252. ret = ath10k_core_register(ar, chip_id);
  2253. if (ret) {
  2254. ath10k_err("failed to register driver core: %d\n", ret);
  2255. goto err_free_ce;
  2256. }
  2257. return 0;
  2258. err_free_ce:
  2259. ath10k_pci_free_ce(ar);
  2260. err_iomap:
  2261. pci_iounmap(pdev, mem);
  2262. err_master:
  2263. pci_clear_master(pdev);
  2264. err_region:
  2265. pci_release_region(pdev, BAR_NUM);
  2266. err_device:
  2267. pci_disable_device(pdev);
  2268. err_ar:
  2269. ath10k_core_destroy(ar);
  2270. err_ar_pci:
  2271. /* call HIF PCI free here */
  2272. kfree(ar_pci);
  2273. return ret;
  2274. }
  2275. static void ath10k_pci_remove(struct pci_dev *pdev)
  2276. {
  2277. struct ath10k *ar = pci_get_drvdata(pdev);
  2278. struct ath10k_pci *ar_pci;
  2279. ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
  2280. if (!ar)
  2281. return;
  2282. ar_pci = ath10k_pci_priv(ar);
  2283. if (!ar_pci)
  2284. return;
  2285. ath10k_core_unregister(ar);
  2286. ath10k_pci_free_ce(ar);
  2287. pci_iounmap(pdev, ar_pci->mem);
  2288. pci_release_region(pdev, BAR_NUM);
  2289. pci_clear_master(pdev);
  2290. pci_disable_device(pdev);
  2291. ath10k_core_destroy(ar);
  2292. kfree(ar_pci);
  2293. }
  2294. MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
  2295. static struct pci_driver ath10k_pci_driver = {
  2296. .name = "ath10k_pci",
  2297. .id_table = ath10k_pci_id_table,
  2298. .probe = ath10k_pci_probe,
  2299. .remove = ath10k_pci_remove,
  2300. };
  2301. static int __init ath10k_pci_init(void)
  2302. {
  2303. int ret;
  2304. ret = pci_register_driver(&ath10k_pci_driver);
  2305. if (ret)
  2306. ath10k_err("failed to register PCI driver: %d\n", ret);
  2307. return ret;
  2308. }
  2309. module_init(ath10k_pci_init);
  2310. static void __exit ath10k_pci_exit(void)
  2311. {
  2312. pci_unregister_driver(&ath10k_pci_driver);
  2313. }
  2314. module_exit(ath10k_pci_exit);
  2315. MODULE_AUTHOR("Qualcomm Atheros");
  2316. MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
  2317. MODULE_LICENSE("Dual BSD/GPL");
  2318. MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
  2319. MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);