pci.c 63 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <linux/pci.h>
  18. #include <linux/module.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/bitops.h>
  22. #include "core.h"
  23. #include "debug.h"
  24. #include "targaddrs.h"
  25. #include "bmi.h"
  26. #include "hif.h"
  27. #include "htc.h"
  28. #include "ce.h"
  29. #include "pci.h"
  30. enum ath10k_pci_irq_mode {
  31. ATH10K_PCI_IRQ_AUTO = 0,
  32. ATH10K_PCI_IRQ_LEGACY = 1,
  33. ATH10K_PCI_IRQ_MSI = 2,
  34. };
  35. enum ath10k_pci_reset_mode {
  36. ATH10K_PCI_RESET_AUTO = 0,
  37. ATH10K_PCI_RESET_WARM_ONLY = 1,
  38. };
  39. static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  40. static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  41. module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  42. MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  43. module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
  44. MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
  45. /* how long wait to wait for target to initialise, in ms */
  46. #define ATH10K_PCI_TARGET_WAIT 3000
  47. #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  48. #define QCA988X_2_0_DEVICE_ID (0x003c)
  49. static const struct pci_device_id ath10k_pci_id_table[] = {
  50. { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  51. {0}
  52. };
  53. static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
  54. static int ath10k_pci_cold_reset(struct ath10k *ar);
  55. static int ath10k_pci_warm_reset(struct ath10k *ar);
  56. static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
  57. static int ath10k_pci_init_irq(struct ath10k *ar);
  58. static int ath10k_pci_deinit_irq(struct ath10k *ar);
  59. static int ath10k_pci_request_irq(struct ath10k *ar);
  60. static void ath10k_pci_free_irq(struct ath10k *ar);
  61. static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
  62. struct ath10k_ce_pipe *rx_pipe,
  63. struct bmi_xfer *xfer);
  64. static const struct ce_attr host_ce_config_wlan[] = {
  65. /* CE0: host->target HTC control and raw streams */
  66. {
  67. .flags = CE_ATTR_FLAGS,
  68. .src_nentries = 16,
  69. .src_sz_max = 256,
  70. .dest_nentries = 0,
  71. },
  72. /* CE1: target->host HTT + HTC control */
  73. {
  74. .flags = CE_ATTR_FLAGS,
  75. .src_nentries = 0,
  76. .src_sz_max = 512,
  77. .dest_nentries = 512,
  78. },
  79. /* CE2: target->host WMI */
  80. {
  81. .flags = CE_ATTR_FLAGS,
  82. .src_nentries = 0,
  83. .src_sz_max = 2048,
  84. .dest_nentries = 32,
  85. },
  86. /* CE3: host->target WMI */
  87. {
  88. .flags = CE_ATTR_FLAGS,
  89. .src_nentries = 32,
  90. .src_sz_max = 2048,
  91. .dest_nentries = 0,
  92. },
  93. /* CE4: host->target HTT */
  94. {
  95. .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  96. .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
  97. .src_sz_max = 256,
  98. .dest_nentries = 0,
  99. },
  100. /* CE5: unused */
  101. {
  102. .flags = CE_ATTR_FLAGS,
  103. .src_nentries = 0,
  104. .src_sz_max = 0,
  105. .dest_nentries = 0,
  106. },
  107. /* CE6: target autonomous hif_memcpy */
  108. {
  109. .flags = CE_ATTR_FLAGS,
  110. .src_nentries = 0,
  111. .src_sz_max = 0,
  112. .dest_nentries = 0,
  113. },
  114. /* CE7: ce_diag, the Diagnostic Window */
  115. {
  116. .flags = CE_ATTR_FLAGS,
  117. .src_nentries = 2,
  118. .src_sz_max = DIAG_TRANSFER_LIMIT,
  119. .dest_nentries = 2,
  120. },
  121. };
  122. /* Target firmware's Copy Engine configuration. */
  123. static const struct ce_pipe_config target_ce_config_wlan[] = {
  124. /* CE0: host->target HTC control and raw streams */
  125. {
  126. .pipenum = __cpu_to_le32(0),
  127. .pipedir = __cpu_to_le32(PIPEDIR_OUT),
  128. .nentries = __cpu_to_le32(32),
  129. .nbytes_max = __cpu_to_le32(256),
  130. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  131. .reserved = __cpu_to_le32(0),
  132. },
  133. /* CE1: target->host HTT + HTC control */
  134. {
  135. .pipenum = __cpu_to_le32(1),
  136. .pipedir = __cpu_to_le32(PIPEDIR_IN),
  137. .nentries = __cpu_to_le32(32),
  138. .nbytes_max = __cpu_to_le32(512),
  139. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  140. .reserved = __cpu_to_le32(0),
  141. },
  142. /* CE2: target->host WMI */
  143. {
  144. .pipenum = __cpu_to_le32(2),
  145. .pipedir = __cpu_to_le32(PIPEDIR_IN),
  146. .nentries = __cpu_to_le32(32),
  147. .nbytes_max = __cpu_to_le32(2048),
  148. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  149. .reserved = __cpu_to_le32(0),
  150. },
  151. /* CE3: host->target WMI */
  152. {
  153. .pipenum = __cpu_to_le32(3),
  154. .pipedir = __cpu_to_le32(PIPEDIR_OUT),
  155. .nentries = __cpu_to_le32(32),
  156. .nbytes_max = __cpu_to_le32(2048),
  157. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  158. .reserved = __cpu_to_le32(0),
  159. },
  160. /* CE4: host->target HTT */
  161. {
  162. .pipenum = __cpu_to_le32(4),
  163. .pipedir = __cpu_to_le32(PIPEDIR_OUT),
  164. .nentries = __cpu_to_le32(256),
  165. .nbytes_max = __cpu_to_le32(256),
  166. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  167. .reserved = __cpu_to_le32(0),
  168. },
  169. /* NB: 50% of src nentries, since tx has 2 frags */
  170. /* CE5: unused */
  171. {
  172. .pipenum = __cpu_to_le32(5),
  173. .pipedir = __cpu_to_le32(PIPEDIR_OUT),
  174. .nentries = __cpu_to_le32(32),
  175. .nbytes_max = __cpu_to_le32(2048),
  176. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  177. .reserved = __cpu_to_le32(0),
  178. },
  179. /* CE6: Reserved for target autonomous hif_memcpy */
  180. {
  181. .pipenum = __cpu_to_le32(6),
  182. .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
  183. .nentries = __cpu_to_le32(32),
  184. .nbytes_max = __cpu_to_le32(4096),
  185. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  186. .reserved = __cpu_to_le32(0),
  187. },
  188. /* CE7 used only by Host */
  189. };
  190. /*
  191. * Map from service/endpoint to Copy Engine.
  192. * This table is derived from the CE_PCI TABLE, above.
  193. * It is passed to the Target at startup for use by firmware.
  194. */
  195. static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
  196. {
  197. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
  198. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  199. __cpu_to_le32(3),
  200. },
  201. {
  202. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
  203. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  204. __cpu_to_le32(2),
  205. },
  206. {
  207. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
  208. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  209. __cpu_to_le32(3),
  210. },
  211. {
  212. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
  213. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  214. __cpu_to_le32(2),
  215. },
  216. {
  217. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
  218. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  219. __cpu_to_le32(3),
  220. },
  221. {
  222. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
  223. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  224. __cpu_to_le32(2),
  225. },
  226. {
  227. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
  228. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  229. __cpu_to_le32(3),
  230. },
  231. {
  232. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
  233. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  234. __cpu_to_le32(2),
  235. },
  236. {
  237. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
  238. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  239. __cpu_to_le32(3),
  240. },
  241. {
  242. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
  243. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  244. __cpu_to_le32(2),
  245. },
  246. {
  247. __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
  248. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  249. __cpu_to_le32(0),
  250. },
  251. {
  252. __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
  253. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  254. __cpu_to_le32(1),
  255. },
  256. { /* not used */
  257. __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
  258. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  259. __cpu_to_le32(0),
  260. },
  261. { /* not used */
  262. __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
  263. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  264. __cpu_to_le32(1),
  265. },
  266. {
  267. __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
  268. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  269. __cpu_to_le32(4),
  270. },
  271. {
  272. __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
  273. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  274. __cpu_to_le32(1),
  275. },
  276. /* (Additions here) */
  277. { /* must be last */
  278. __cpu_to_le32(0),
  279. __cpu_to_le32(0),
  280. __cpu_to_le32(0),
  281. },
  282. };
  283. static bool ath10k_pci_irq_pending(struct ath10k *ar)
  284. {
  285. u32 cause;
  286. /* Check if the shared legacy irq is for us */
  287. cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  288. PCIE_INTR_CAUSE_ADDRESS);
  289. if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
  290. return true;
  291. return false;
  292. }
  293. static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
  294. {
  295. /* IMPORTANT: INTR_CLR register has to be set after
  296. * INTR_ENABLE is set to 0, otherwise interrupt can not be
  297. * really cleared. */
  298. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
  299. 0);
  300. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
  301. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  302. /* IMPORTANT: this extra read transaction is required to
  303. * flush the posted write buffer. */
  304. (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  305. PCIE_INTR_ENABLE_ADDRESS);
  306. }
  307. static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
  308. {
  309. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
  310. PCIE_INTR_ENABLE_ADDRESS,
  311. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  312. /* IMPORTANT: this extra read transaction is required to
  313. * flush the posted write buffer. */
  314. (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  315. PCIE_INTR_ENABLE_ADDRESS);
  316. }
  317. static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
  318. {
  319. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  320. if (ar_pci->num_msi_intrs > 1)
  321. return "msi-x";
  322. if (ar_pci->num_msi_intrs == 1)
  323. return "msi";
  324. return "legacy";
  325. }
  326. static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
  327. {
  328. struct ath10k *ar = pipe->hif_ce_state;
  329. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  330. struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
  331. struct sk_buff *skb;
  332. dma_addr_t paddr;
  333. int ret;
  334. lockdep_assert_held(&ar_pci->ce_lock);
  335. skb = dev_alloc_skb(pipe->buf_sz);
  336. if (!skb)
  337. return -ENOMEM;
  338. WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  339. paddr = dma_map_single(ar->dev, skb->data,
  340. skb->len + skb_tailroom(skb),
  341. DMA_FROM_DEVICE);
  342. if (unlikely(dma_mapping_error(ar->dev, paddr))) {
  343. ath10k_warn(ar, "failed to dma map pci rx buf\n");
  344. dev_kfree_skb_any(skb);
  345. return -EIO;
  346. }
  347. ATH10K_SKB_CB(skb)->paddr = paddr;
  348. ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
  349. if (ret) {
  350. ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
  351. dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
  352. DMA_FROM_DEVICE);
  353. dev_kfree_skb_any(skb);
  354. return ret;
  355. }
  356. return 0;
  357. }
  358. static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
  359. {
  360. struct ath10k *ar = pipe->hif_ce_state;
  361. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  362. struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
  363. int ret, num;
  364. lockdep_assert_held(&ar_pci->ce_lock);
  365. if (pipe->buf_sz == 0)
  366. return;
  367. if (!ce_pipe->dest_ring)
  368. return;
  369. num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
  370. while (num--) {
  371. ret = __ath10k_pci_rx_post_buf(pipe);
  372. if (ret) {
  373. ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
  374. mod_timer(&ar_pci->rx_post_retry, jiffies +
  375. ATH10K_PCI_RX_POST_RETRY_MS);
  376. break;
  377. }
  378. }
  379. }
  380. static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
  381. {
  382. struct ath10k *ar = pipe->hif_ce_state;
  383. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  384. spin_lock_bh(&ar_pci->ce_lock);
  385. __ath10k_pci_rx_post_pipe(pipe);
  386. spin_unlock_bh(&ar_pci->ce_lock);
  387. }
  388. static void ath10k_pci_rx_post(struct ath10k *ar)
  389. {
  390. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  391. int i;
  392. spin_lock_bh(&ar_pci->ce_lock);
  393. for (i = 0; i < CE_COUNT; i++)
  394. __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
  395. spin_unlock_bh(&ar_pci->ce_lock);
  396. }
  397. static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
  398. {
  399. struct ath10k *ar = (void *)ptr;
  400. ath10k_pci_rx_post(ar);
  401. }
  402. /*
  403. * Diagnostic read/write access is provided for startup/config/debug usage.
  404. * Caller must guarantee proper alignment, when applicable, and single user
  405. * at any moment.
  406. */
  407. static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
  408. int nbytes)
  409. {
  410. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  411. int ret = 0;
  412. u32 buf;
  413. unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
  414. unsigned int id;
  415. unsigned int flags;
  416. struct ath10k_ce_pipe *ce_diag;
  417. /* Host buffer address in CE space */
  418. u32 ce_data;
  419. dma_addr_t ce_data_base = 0;
  420. void *data_buf = NULL;
  421. int i;
  422. spin_lock_bh(&ar_pci->ce_lock);
  423. ce_diag = ar_pci->ce_diag;
  424. /*
  425. * Allocate a temporary bounce buffer to hold caller's data
  426. * to be DMA'ed from Target. This guarantees
  427. * 1) 4-byte alignment
  428. * 2) Buffer in DMA-able space
  429. */
  430. orig_nbytes = nbytes;
  431. data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
  432. orig_nbytes,
  433. &ce_data_base,
  434. GFP_ATOMIC);
  435. if (!data_buf) {
  436. ret = -ENOMEM;
  437. goto done;
  438. }
  439. memset(data_buf, 0, orig_nbytes);
  440. remaining_bytes = orig_nbytes;
  441. ce_data = ce_data_base;
  442. while (remaining_bytes) {
  443. nbytes = min_t(unsigned int, remaining_bytes,
  444. DIAG_TRANSFER_LIMIT);
  445. ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
  446. if (ret != 0)
  447. goto done;
  448. /* Request CE to send from Target(!) address to Host buffer */
  449. /*
  450. * The address supplied by the caller is in the
  451. * Target CPU virtual address space.
  452. *
  453. * In order to use this address with the diagnostic CE,
  454. * convert it from Target CPU virtual address space
  455. * to CE address space
  456. */
  457. address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
  458. address);
  459. ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
  460. 0);
  461. if (ret)
  462. goto done;
  463. i = 0;
  464. while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
  465. &completed_nbytes,
  466. &id) != 0) {
  467. mdelay(1);
  468. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  469. ret = -EBUSY;
  470. goto done;
  471. }
  472. }
  473. if (nbytes != completed_nbytes) {
  474. ret = -EIO;
  475. goto done;
  476. }
  477. if (buf != (u32)address) {
  478. ret = -EIO;
  479. goto done;
  480. }
  481. i = 0;
  482. while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
  483. &completed_nbytes,
  484. &id, &flags) != 0) {
  485. mdelay(1);
  486. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  487. ret = -EBUSY;
  488. goto done;
  489. }
  490. }
  491. if (nbytes != completed_nbytes) {
  492. ret = -EIO;
  493. goto done;
  494. }
  495. if (buf != ce_data) {
  496. ret = -EIO;
  497. goto done;
  498. }
  499. remaining_bytes -= nbytes;
  500. address += nbytes;
  501. ce_data += nbytes;
  502. }
  503. done:
  504. if (ret == 0)
  505. memcpy(data, data_buf, orig_nbytes);
  506. else
  507. ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
  508. address, ret);
  509. if (data_buf)
  510. dma_free_coherent(ar->dev, orig_nbytes, data_buf,
  511. ce_data_base);
  512. spin_unlock_bh(&ar_pci->ce_lock);
  513. return ret;
  514. }
  515. static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
  516. {
  517. __le32 val = 0;
  518. int ret;
  519. ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
  520. *value = __le32_to_cpu(val);
  521. return ret;
  522. }
  523. static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
  524. u32 src, u32 len)
  525. {
  526. u32 host_addr, addr;
  527. int ret;
  528. host_addr = host_interest_item_address(src);
  529. ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
  530. if (ret != 0) {
  531. ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
  532. src, ret);
  533. return ret;
  534. }
  535. ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
  536. if (ret != 0) {
  537. ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
  538. addr, len, ret);
  539. return ret;
  540. }
  541. return 0;
  542. }
  543. #define ath10k_pci_diag_read_hi(ar, dest, src, len) \
  544. __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
  545. static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
  546. const void *data, int nbytes)
  547. {
  548. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  549. int ret = 0;
  550. u32 buf;
  551. unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
  552. unsigned int id;
  553. unsigned int flags;
  554. struct ath10k_ce_pipe *ce_diag;
  555. void *data_buf = NULL;
  556. u32 ce_data; /* Host buffer address in CE space */
  557. dma_addr_t ce_data_base = 0;
  558. int i;
  559. spin_lock_bh(&ar_pci->ce_lock);
  560. ce_diag = ar_pci->ce_diag;
  561. /*
  562. * Allocate a temporary bounce buffer to hold caller's data
  563. * to be DMA'ed to Target. This guarantees
  564. * 1) 4-byte alignment
  565. * 2) Buffer in DMA-able space
  566. */
  567. orig_nbytes = nbytes;
  568. data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
  569. orig_nbytes,
  570. &ce_data_base,
  571. GFP_ATOMIC);
  572. if (!data_buf) {
  573. ret = -ENOMEM;
  574. goto done;
  575. }
  576. /* Copy caller's data to allocated DMA buf */
  577. memcpy(data_buf, data, orig_nbytes);
  578. /*
  579. * The address supplied by the caller is in the
  580. * Target CPU virtual address space.
  581. *
  582. * In order to use this address with the diagnostic CE,
  583. * convert it from
  584. * Target CPU virtual address space
  585. * to
  586. * CE address space
  587. */
  588. address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
  589. remaining_bytes = orig_nbytes;
  590. ce_data = ce_data_base;
  591. while (remaining_bytes) {
  592. /* FIXME: check cast */
  593. nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
  594. /* Set up to receive directly into Target(!) address */
  595. ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
  596. if (ret != 0)
  597. goto done;
  598. /*
  599. * Request CE to send caller-supplied data that
  600. * was copied to bounce buffer to Target(!) address.
  601. */
  602. ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
  603. nbytes, 0, 0);
  604. if (ret != 0)
  605. goto done;
  606. i = 0;
  607. while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
  608. &completed_nbytes,
  609. &id) != 0) {
  610. mdelay(1);
  611. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  612. ret = -EBUSY;
  613. goto done;
  614. }
  615. }
  616. if (nbytes != completed_nbytes) {
  617. ret = -EIO;
  618. goto done;
  619. }
  620. if (buf != ce_data) {
  621. ret = -EIO;
  622. goto done;
  623. }
  624. i = 0;
  625. while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
  626. &completed_nbytes,
  627. &id, &flags) != 0) {
  628. mdelay(1);
  629. if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  630. ret = -EBUSY;
  631. goto done;
  632. }
  633. }
  634. if (nbytes != completed_nbytes) {
  635. ret = -EIO;
  636. goto done;
  637. }
  638. if (buf != address) {
  639. ret = -EIO;
  640. goto done;
  641. }
  642. remaining_bytes -= nbytes;
  643. address += nbytes;
  644. ce_data += nbytes;
  645. }
  646. done:
  647. if (data_buf) {
  648. dma_free_coherent(ar->dev, orig_nbytes, data_buf,
  649. ce_data_base);
  650. }
  651. if (ret != 0)
  652. ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
  653. address, ret);
  654. spin_unlock_bh(&ar_pci->ce_lock);
  655. return ret;
  656. }
  657. static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
  658. {
  659. __le32 val = __cpu_to_le32(value);
  660. return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
  661. }
  662. static bool ath10k_pci_is_awake(struct ath10k *ar)
  663. {
  664. u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
  665. return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
  666. }
  667. static int ath10k_pci_wake_wait(struct ath10k *ar)
  668. {
  669. int tot_delay = 0;
  670. int curr_delay = 5;
  671. while (tot_delay < PCIE_WAKE_TIMEOUT) {
  672. if (ath10k_pci_is_awake(ar))
  673. return 0;
  674. udelay(curr_delay);
  675. tot_delay += curr_delay;
  676. if (curr_delay < 50)
  677. curr_delay += 5;
  678. }
  679. return -ETIMEDOUT;
  680. }
  681. static int ath10k_pci_wake(struct ath10k *ar)
  682. {
  683. ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
  684. PCIE_SOC_WAKE_V_MASK);
  685. return ath10k_pci_wake_wait(ar);
  686. }
  687. static void ath10k_pci_sleep(struct ath10k *ar)
  688. {
  689. ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
  690. PCIE_SOC_WAKE_RESET);
  691. }
  692. /* Called by lower (CE) layer when a send to Target completes. */
  693. static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
  694. {
  695. struct ath10k *ar = ce_state->ar;
  696. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  697. struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
  698. void *transfer_context;
  699. u32 ce_data;
  700. unsigned int nbytes;
  701. unsigned int transfer_id;
  702. while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
  703. &ce_data, &nbytes,
  704. &transfer_id) == 0) {
  705. /* no need to call tx completion for NULL pointers */
  706. if (transfer_context == NULL)
  707. continue;
  708. cb->tx_completion(ar, transfer_context, transfer_id);
  709. }
  710. }
  711. /* Called by lower (CE) layer when data is received from the Target. */
  712. static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
  713. {
  714. struct ath10k *ar = ce_state->ar;
  715. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  716. struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
  717. struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
  718. struct sk_buff *skb;
  719. void *transfer_context;
  720. u32 ce_data;
  721. unsigned int nbytes, max_nbytes;
  722. unsigned int transfer_id;
  723. unsigned int flags;
  724. while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
  725. &ce_data, &nbytes, &transfer_id,
  726. &flags) == 0) {
  727. skb = transfer_context;
  728. max_nbytes = skb->len + skb_tailroom(skb);
  729. dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
  730. max_nbytes, DMA_FROM_DEVICE);
  731. if (unlikely(max_nbytes < nbytes)) {
  732. ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
  733. nbytes, max_nbytes);
  734. dev_kfree_skb_any(skb);
  735. continue;
  736. }
  737. skb_put(skb, nbytes);
  738. ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
  739. ce_state->id, skb->len);
  740. ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
  741. skb->data, skb->len);
  742. cb->rx_completion(ar, skb, pipe_info->pipe_num);
  743. }
  744. ath10k_pci_rx_post_pipe(pipe_info);
  745. }
  746. static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
  747. struct ath10k_hif_sg_item *items, int n_items)
  748. {
  749. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  750. struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
  751. struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
  752. struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
  753. unsigned int nentries_mask;
  754. unsigned int sw_index;
  755. unsigned int write_index;
  756. int err, i = 0;
  757. spin_lock_bh(&ar_pci->ce_lock);
  758. nentries_mask = src_ring->nentries_mask;
  759. sw_index = src_ring->sw_index;
  760. write_index = src_ring->write_index;
  761. if (unlikely(CE_RING_DELTA(nentries_mask,
  762. write_index, sw_index - 1) < n_items)) {
  763. err = -ENOBUFS;
  764. goto err;
  765. }
  766. for (i = 0; i < n_items - 1; i++) {
  767. ath10k_dbg(ar, ATH10K_DBG_PCI,
  768. "pci tx item %d paddr 0x%08x len %d n_items %d\n",
  769. i, items[i].paddr, items[i].len, n_items);
  770. ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
  771. items[i].vaddr, items[i].len);
  772. err = ath10k_ce_send_nolock(ce_pipe,
  773. items[i].transfer_context,
  774. items[i].paddr,
  775. items[i].len,
  776. items[i].transfer_id,
  777. CE_SEND_FLAG_GATHER);
  778. if (err)
  779. goto err;
  780. }
  781. /* `i` is equal to `n_items -1` after for() */
  782. ath10k_dbg(ar, ATH10K_DBG_PCI,
  783. "pci tx item %d paddr 0x%08x len %d n_items %d\n",
  784. i, items[i].paddr, items[i].len, n_items);
  785. ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
  786. items[i].vaddr, items[i].len);
  787. err = ath10k_ce_send_nolock(ce_pipe,
  788. items[i].transfer_context,
  789. items[i].paddr,
  790. items[i].len,
  791. items[i].transfer_id,
  792. 0);
  793. if (err)
  794. goto err;
  795. spin_unlock_bh(&ar_pci->ce_lock);
  796. return 0;
  797. err:
  798. for (; i > 0; i--)
  799. __ath10k_ce_send_revert(ce_pipe);
  800. spin_unlock_bh(&ar_pci->ce_lock);
  801. return err;
  802. }
  803. static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
  804. size_t buf_len)
  805. {
  806. return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
  807. }
  808. static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
  809. {
  810. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  811. ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
  812. return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
  813. }
  814. static void ath10k_pci_dump_registers(struct ath10k *ar,
  815. struct ath10k_fw_crash_data *crash_data)
  816. {
  817. __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
  818. int i, ret;
  819. lockdep_assert_held(&ar->data_lock);
  820. ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
  821. hi_failure_state,
  822. REG_DUMP_COUNT_QCA988X * sizeof(__le32));
  823. if (ret) {
  824. ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
  825. return;
  826. }
  827. BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
  828. ath10k_err(ar, "firmware register dump:\n");
  829. for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
  830. ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
  831. i,
  832. __le32_to_cpu(reg_dump_values[i]),
  833. __le32_to_cpu(reg_dump_values[i + 1]),
  834. __le32_to_cpu(reg_dump_values[i + 2]),
  835. __le32_to_cpu(reg_dump_values[i + 3]));
  836. if (!crash_data)
  837. return;
  838. for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
  839. crash_data->registers[i] = reg_dump_values[i];
  840. }
  841. static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
  842. {
  843. struct ath10k_fw_crash_data *crash_data;
  844. char uuid[50];
  845. spin_lock_bh(&ar->data_lock);
  846. ar->stats.fw_crash_counter++;
  847. crash_data = ath10k_debug_get_new_fw_crash_data(ar);
  848. if (crash_data)
  849. scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
  850. else
  851. scnprintf(uuid, sizeof(uuid), "n/a");
  852. ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
  853. ath10k_print_driver_info(ar);
  854. ath10k_pci_dump_registers(ar, crash_data);
  855. spin_unlock_bh(&ar->data_lock);
  856. queue_work(ar->workqueue, &ar->restart_work);
  857. }
  858. static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
  859. int force)
  860. {
  861. ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
  862. if (!force) {
  863. int resources;
  864. /*
  865. * Decide whether to actually poll for completions, or just
  866. * wait for a later chance.
  867. * If there seem to be plenty of resources left, then just wait
  868. * since checking involves reading a CE register, which is a
  869. * relatively expensive operation.
  870. */
  871. resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
  872. /*
  873. * If at least 50% of the total resources are still available,
  874. * don't bother checking again yet.
  875. */
  876. if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
  877. return;
  878. }
  879. ath10k_ce_per_engine_service(ar, pipe);
  880. }
  881. static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
  882. struct ath10k_hif_cb *callbacks)
  883. {
  884. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  885. ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
  886. memcpy(&ar_pci->msg_callbacks_current, callbacks,
  887. sizeof(ar_pci->msg_callbacks_current));
  888. }
  889. static void ath10k_pci_kill_tasklet(struct ath10k *ar)
  890. {
  891. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  892. int i;
  893. tasklet_kill(&ar_pci->intr_tq);
  894. tasklet_kill(&ar_pci->msi_fw_err);
  895. for (i = 0; i < CE_COUNT; i++)
  896. tasklet_kill(&ar_pci->pipe_info[i].intr);
  897. del_timer_sync(&ar_pci->rx_post_retry);
  898. }
  899. static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
  900. u16 service_id, u8 *ul_pipe,
  901. u8 *dl_pipe, int *ul_is_polled,
  902. int *dl_is_polled)
  903. {
  904. const struct service_to_pipe *entry;
  905. bool ul_set = false, dl_set = false;
  906. int i;
  907. ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
  908. /* polling for received messages not supported */
  909. *dl_is_polled = 0;
  910. for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
  911. entry = &target_service_to_ce_map_wlan[i];
  912. if (__le32_to_cpu(entry->service_id) != service_id)
  913. continue;
  914. switch (__le32_to_cpu(entry->pipedir)) {
  915. case PIPEDIR_NONE:
  916. break;
  917. case PIPEDIR_IN:
  918. WARN_ON(dl_set);
  919. *dl_pipe = __le32_to_cpu(entry->pipenum);
  920. dl_set = true;
  921. break;
  922. case PIPEDIR_OUT:
  923. WARN_ON(ul_set);
  924. *ul_pipe = __le32_to_cpu(entry->pipenum);
  925. ul_set = true;
  926. break;
  927. case PIPEDIR_INOUT:
  928. WARN_ON(dl_set);
  929. WARN_ON(ul_set);
  930. *dl_pipe = __le32_to_cpu(entry->pipenum);
  931. *ul_pipe = __le32_to_cpu(entry->pipenum);
  932. dl_set = true;
  933. ul_set = true;
  934. break;
  935. }
  936. }
  937. if (WARN_ON(!ul_set || !dl_set))
  938. return -ENOENT;
  939. *ul_is_polled =
  940. (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
  941. return 0;
  942. }
  943. static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
  944. u8 *ul_pipe, u8 *dl_pipe)
  945. {
  946. int ul_is_polled, dl_is_polled;
  947. ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
  948. (void)ath10k_pci_hif_map_service_to_pipe(ar,
  949. ATH10K_HTC_SVC_ID_RSVD_CTRL,
  950. ul_pipe,
  951. dl_pipe,
  952. &ul_is_polled,
  953. &dl_is_polled);
  954. }
  955. static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
  956. {
  957. u32 val;
  958. val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
  959. val &= ~CORE_CTRL_PCIE_REG_31_MASK;
  960. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
  961. }
  962. static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
  963. {
  964. u32 val;
  965. val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
  966. val |= CORE_CTRL_PCIE_REG_31_MASK;
  967. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
  968. }
  969. static void ath10k_pci_irq_disable(struct ath10k *ar)
  970. {
  971. ath10k_ce_disable_interrupts(ar);
  972. ath10k_pci_disable_and_clear_legacy_irq(ar);
  973. ath10k_pci_irq_msi_fw_mask(ar);
  974. }
  975. static void ath10k_pci_irq_sync(struct ath10k *ar)
  976. {
  977. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  978. int i;
  979. for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
  980. synchronize_irq(ar_pci->pdev->irq + i);
  981. }
  982. static void ath10k_pci_irq_enable(struct ath10k *ar)
  983. {
  984. ath10k_ce_enable_interrupts(ar);
  985. ath10k_pci_enable_legacy_irq(ar);
  986. ath10k_pci_irq_msi_fw_unmask(ar);
  987. }
  988. static int ath10k_pci_hif_start(struct ath10k *ar)
  989. {
  990. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
  991. ath10k_pci_irq_enable(ar);
  992. ath10k_pci_rx_post(ar);
  993. return 0;
  994. }
  995. static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
  996. {
  997. struct ath10k *ar;
  998. struct ath10k_pci *ar_pci;
  999. struct ath10k_ce_pipe *ce_hdl;
  1000. u32 buf_sz;
  1001. struct sk_buff *netbuf;
  1002. u32 ce_data;
  1003. buf_sz = pipe_info->buf_sz;
  1004. /* Unused Copy Engine */
  1005. if (buf_sz == 0)
  1006. return;
  1007. ar = pipe_info->hif_ce_state;
  1008. ar_pci = ath10k_pci_priv(ar);
  1009. ce_hdl = pipe_info->ce_hdl;
  1010. while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
  1011. &ce_data) == 0) {
  1012. dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
  1013. netbuf->len + skb_tailroom(netbuf),
  1014. DMA_FROM_DEVICE);
  1015. dev_kfree_skb_any(netbuf);
  1016. }
  1017. }
  1018. static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
  1019. {
  1020. struct ath10k *ar;
  1021. struct ath10k_pci *ar_pci;
  1022. struct ath10k_ce_pipe *ce_hdl;
  1023. struct sk_buff *netbuf;
  1024. u32 ce_data;
  1025. unsigned int nbytes;
  1026. unsigned int id;
  1027. u32 buf_sz;
  1028. buf_sz = pipe_info->buf_sz;
  1029. /* Unused Copy Engine */
  1030. if (buf_sz == 0)
  1031. return;
  1032. ar = pipe_info->hif_ce_state;
  1033. ar_pci = ath10k_pci_priv(ar);
  1034. ce_hdl = pipe_info->ce_hdl;
  1035. while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
  1036. &ce_data, &nbytes, &id) == 0) {
  1037. /* no need to call tx completion for NULL pointers */
  1038. if (!netbuf)
  1039. continue;
  1040. ar_pci->msg_callbacks_current.tx_completion(ar,
  1041. netbuf,
  1042. id);
  1043. }
  1044. }
  1045. /*
  1046. * Cleanup residual buffers for device shutdown:
  1047. * buffers that were enqueued for receive
  1048. * buffers that were to be sent
  1049. * Note: Buffers that had completed but which were
  1050. * not yet processed are on a completion queue. They
  1051. * are handled when the completion thread shuts down.
  1052. */
  1053. static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
  1054. {
  1055. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1056. int pipe_num;
  1057. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  1058. struct ath10k_pci_pipe *pipe_info;
  1059. pipe_info = &ar_pci->pipe_info[pipe_num];
  1060. ath10k_pci_rx_pipe_cleanup(pipe_info);
  1061. ath10k_pci_tx_pipe_cleanup(pipe_info);
  1062. }
  1063. }
  1064. static void ath10k_pci_ce_deinit(struct ath10k *ar)
  1065. {
  1066. int i;
  1067. for (i = 0; i < CE_COUNT; i++)
  1068. ath10k_ce_deinit_pipe(ar, i);
  1069. }
  1070. static void ath10k_pci_flush(struct ath10k *ar)
  1071. {
  1072. ath10k_pci_kill_tasklet(ar);
  1073. ath10k_pci_buffer_cleanup(ar);
  1074. }
  1075. static void ath10k_pci_hif_stop(struct ath10k *ar)
  1076. {
  1077. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
  1078. /* Most likely the device has HTT Rx ring configured. The only way to
  1079. * prevent the device from accessing (and possible corrupting) host
  1080. * memory is to reset the chip now.
  1081. *
  1082. * There's also no known way of masking MSI interrupts on the device.
  1083. * For ranged MSI the CE-related interrupts can be masked. However
  1084. * regardless how many MSI interrupts are assigned the first one
  1085. * is always used for firmware indications (crashes) and cannot be
  1086. * masked. To prevent the device from asserting the interrupt reset it
  1087. * before proceeding with cleanup.
  1088. */
  1089. ath10k_pci_warm_reset(ar);
  1090. ath10k_pci_irq_disable(ar);
  1091. ath10k_pci_irq_sync(ar);
  1092. ath10k_pci_flush(ar);
  1093. }
  1094. static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
  1095. void *req, u32 req_len,
  1096. void *resp, u32 *resp_len)
  1097. {
  1098. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1099. struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
  1100. struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
  1101. struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
  1102. struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
  1103. dma_addr_t req_paddr = 0;
  1104. dma_addr_t resp_paddr = 0;
  1105. struct bmi_xfer xfer = {};
  1106. void *treq, *tresp = NULL;
  1107. int ret = 0;
  1108. might_sleep();
  1109. if (resp && !resp_len)
  1110. return -EINVAL;
  1111. if (resp && resp_len && *resp_len == 0)
  1112. return -EINVAL;
  1113. treq = kmemdup(req, req_len, GFP_KERNEL);
  1114. if (!treq)
  1115. return -ENOMEM;
  1116. req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
  1117. ret = dma_mapping_error(ar->dev, req_paddr);
  1118. if (ret)
  1119. goto err_dma;
  1120. if (resp && resp_len) {
  1121. tresp = kzalloc(*resp_len, GFP_KERNEL);
  1122. if (!tresp) {
  1123. ret = -ENOMEM;
  1124. goto err_req;
  1125. }
  1126. resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
  1127. DMA_FROM_DEVICE);
  1128. ret = dma_mapping_error(ar->dev, resp_paddr);
  1129. if (ret)
  1130. goto err_req;
  1131. xfer.wait_for_resp = true;
  1132. xfer.resp_len = 0;
  1133. ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
  1134. }
  1135. ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
  1136. if (ret)
  1137. goto err_resp;
  1138. ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
  1139. if (ret) {
  1140. u32 unused_buffer;
  1141. unsigned int unused_nbytes;
  1142. unsigned int unused_id;
  1143. ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
  1144. &unused_nbytes, &unused_id);
  1145. } else {
  1146. /* non-zero means we did not time out */
  1147. ret = 0;
  1148. }
  1149. err_resp:
  1150. if (resp) {
  1151. u32 unused_buffer;
  1152. ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
  1153. dma_unmap_single(ar->dev, resp_paddr,
  1154. *resp_len, DMA_FROM_DEVICE);
  1155. }
  1156. err_req:
  1157. dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
  1158. if (ret == 0 && resp_len) {
  1159. *resp_len = min(*resp_len, xfer.resp_len);
  1160. memcpy(resp, tresp, xfer.resp_len);
  1161. }
  1162. err_dma:
  1163. kfree(treq);
  1164. kfree(tresp);
  1165. return ret;
  1166. }
  1167. static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
  1168. {
  1169. struct bmi_xfer *xfer;
  1170. u32 ce_data;
  1171. unsigned int nbytes;
  1172. unsigned int transfer_id;
  1173. if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
  1174. &nbytes, &transfer_id))
  1175. return;
  1176. xfer->tx_done = true;
  1177. }
  1178. static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
  1179. {
  1180. struct ath10k *ar = ce_state->ar;
  1181. struct bmi_xfer *xfer;
  1182. u32 ce_data;
  1183. unsigned int nbytes;
  1184. unsigned int transfer_id;
  1185. unsigned int flags;
  1186. if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
  1187. &nbytes, &transfer_id, &flags))
  1188. return;
  1189. if (!xfer->wait_for_resp) {
  1190. ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
  1191. return;
  1192. }
  1193. xfer->resp_len = nbytes;
  1194. xfer->rx_done = true;
  1195. }
  1196. static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
  1197. struct ath10k_ce_pipe *rx_pipe,
  1198. struct bmi_xfer *xfer)
  1199. {
  1200. unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
  1201. while (time_before_eq(jiffies, timeout)) {
  1202. ath10k_pci_bmi_send_done(tx_pipe);
  1203. ath10k_pci_bmi_recv_data(rx_pipe);
  1204. if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
  1205. return 0;
  1206. schedule();
  1207. }
  1208. return -ETIMEDOUT;
  1209. }
  1210. /*
  1211. * Send an interrupt to the device to wake up the Target CPU
  1212. * so it has an opportunity to notice any changed state.
  1213. */
  1214. static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
  1215. {
  1216. u32 addr, val;
  1217. addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
  1218. val = ath10k_pci_read32(ar, addr);
  1219. val |= CORE_CTRL_CPU_INTR_MASK;
  1220. ath10k_pci_write32(ar, addr, val);
  1221. return 0;
  1222. }
  1223. static int ath10k_pci_init_config(struct ath10k *ar)
  1224. {
  1225. u32 interconnect_targ_addr;
  1226. u32 pcie_state_targ_addr = 0;
  1227. u32 pipe_cfg_targ_addr = 0;
  1228. u32 svc_to_pipe_map = 0;
  1229. u32 pcie_config_flags = 0;
  1230. u32 ealloc_value;
  1231. u32 ealloc_targ_addr;
  1232. u32 flag2_value;
  1233. u32 flag2_targ_addr;
  1234. int ret = 0;
  1235. /* Download to Target the CE Config and the service-to-CE map */
  1236. interconnect_targ_addr =
  1237. host_interest_item_address(HI_ITEM(hi_interconnect_state));
  1238. /* Supply Target-side CE configuration */
  1239. ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
  1240. &pcie_state_targ_addr);
  1241. if (ret != 0) {
  1242. ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
  1243. return ret;
  1244. }
  1245. if (pcie_state_targ_addr == 0) {
  1246. ret = -EIO;
  1247. ath10k_err(ar, "Invalid pcie state addr\n");
  1248. return ret;
  1249. }
  1250. ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
  1251. offsetof(struct pcie_state,
  1252. pipe_cfg_addr)),
  1253. &pipe_cfg_targ_addr);
  1254. if (ret != 0) {
  1255. ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
  1256. return ret;
  1257. }
  1258. if (pipe_cfg_targ_addr == 0) {
  1259. ret = -EIO;
  1260. ath10k_err(ar, "Invalid pipe cfg addr\n");
  1261. return ret;
  1262. }
  1263. ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
  1264. target_ce_config_wlan,
  1265. sizeof(target_ce_config_wlan));
  1266. if (ret != 0) {
  1267. ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
  1268. return ret;
  1269. }
  1270. ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
  1271. offsetof(struct pcie_state,
  1272. svc_to_pipe_map)),
  1273. &svc_to_pipe_map);
  1274. if (ret != 0) {
  1275. ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
  1276. return ret;
  1277. }
  1278. if (svc_to_pipe_map == 0) {
  1279. ret = -EIO;
  1280. ath10k_err(ar, "Invalid svc_to_pipe map\n");
  1281. return ret;
  1282. }
  1283. ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
  1284. target_service_to_ce_map_wlan,
  1285. sizeof(target_service_to_ce_map_wlan));
  1286. if (ret != 0) {
  1287. ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
  1288. return ret;
  1289. }
  1290. ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
  1291. offsetof(struct pcie_state,
  1292. config_flags)),
  1293. &pcie_config_flags);
  1294. if (ret != 0) {
  1295. ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
  1296. return ret;
  1297. }
  1298. pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
  1299. ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
  1300. offsetof(struct pcie_state,
  1301. config_flags)),
  1302. pcie_config_flags);
  1303. if (ret != 0) {
  1304. ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
  1305. return ret;
  1306. }
  1307. /* configure early allocation */
  1308. ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
  1309. ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
  1310. if (ret != 0) {
  1311. ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
  1312. return ret;
  1313. }
  1314. /* first bank is switched to IRAM */
  1315. ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
  1316. HI_EARLY_ALLOC_MAGIC_MASK);
  1317. ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
  1318. HI_EARLY_ALLOC_IRAM_BANKS_MASK);
  1319. ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
  1320. if (ret != 0) {
  1321. ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
  1322. return ret;
  1323. }
  1324. /* Tell Target to proceed with initialization */
  1325. flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
  1326. ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
  1327. if (ret != 0) {
  1328. ath10k_err(ar, "Failed to get option val: %d\n", ret);
  1329. return ret;
  1330. }
  1331. flag2_value |= HI_OPTION_EARLY_CFG_DONE;
  1332. ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
  1333. if (ret != 0) {
  1334. ath10k_err(ar, "Failed to set option val: %d\n", ret);
  1335. return ret;
  1336. }
  1337. return 0;
  1338. }
  1339. static int ath10k_pci_alloc_pipes(struct ath10k *ar)
  1340. {
  1341. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1342. struct ath10k_pci_pipe *pipe;
  1343. int i, ret;
  1344. for (i = 0; i < CE_COUNT; i++) {
  1345. pipe = &ar_pci->pipe_info[i];
  1346. pipe->ce_hdl = &ar_pci->ce_states[i];
  1347. pipe->pipe_num = i;
  1348. pipe->hif_ce_state = ar;
  1349. ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
  1350. ath10k_pci_ce_send_done,
  1351. ath10k_pci_ce_recv_data);
  1352. if (ret) {
  1353. ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
  1354. i, ret);
  1355. return ret;
  1356. }
  1357. /* Last CE is Diagnostic Window */
  1358. if (i == CE_COUNT - 1) {
  1359. ar_pci->ce_diag = pipe->ce_hdl;
  1360. continue;
  1361. }
  1362. pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
  1363. }
  1364. return 0;
  1365. }
  1366. static void ath10k_pci_free_pipes(struct ath10k *ar)
  1367. {
  1368. int i;
  1369. for (i = 0; i < CE_COUNT; i++)
  1370. ath10k_ce_free_pipe(ar, i);
  1371. }
  1372. static int ath10k_pci_init_pipes(struct ath10k *ar)
  1373. {
  1374. int i, ret;
  1375. for (i = 0; i < CE_COUNT; i++) {
  1376. ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
  1377. if (ret) {
  1378. ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
  1379. i, ret);
  1380. return ret;
  1381. }
  1382. }
  1383. return 0;
  1384. }
  1385. static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
  1386. {
  1387. return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
  1388. FW_IND_EVENT_PENDING;
  1389. }
  1390. static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
  1391. {
  1392. u32 val;
  1393. val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
  1394. val &= ~FW_IND_EVENT_PENDING;
  1395. ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
  1396. }
  1397. /* this function effectively clears target memory controller assert line */
  1398. static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
  1399. {
  1400. u32 val;
  1401. val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
  1402. ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
  1403. val | SOC_RESET_CONTROL_SI0_RST_MASK);
  1404. val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
  1405. msleep(10);
  1406. val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
  1407. ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
  1408. val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
  1409. val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
  1410. msleep(10);
  1411. }
  1412. static int ath10k_pci_warm_reset(struct ath10k *ar)
  1413. {
  1414. u32 val;
  1415. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
  1416. spin_lock_bh(&ar->data_lock);
  1417. ar->stats.fw_warm_reset_counter++;
  1418. spin_unlock_bh(&ar->data_lock);
  1419. /* debug */
  1420. val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  1421. PCIE_INTR_CAUSE_ADDRESS);
  1422. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
  1423. val);
  1424. val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  1425. CPU_INTR_ADDRESS);
  1426. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
  1427. val);
  1428. /* disable pending irqs */
  1429. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
  1430. PCIE_INTR_ENABLE_ADDRESS, 0);
  1431. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
  1432. PCIE_INTR_CLR_ADDRESS, ~0);
  1433. msleep(100);
  1434. /* clear fw indicator */
  1435. ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
  1436. /* clear target LF timer interrupts */
  1437. val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  1438. SOC_LF_TIMER_CONTROL0_ADDRESS);
  1439. ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
  1440. SOC_LF_TIMER_CONTROL0_ADDRESS,
  1441. val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
  1442. /* reset CE */
  1443. val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  1444. SOC_RESET_CONTROL_ADDRESS);
  1445. ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
  1446. val | SOC_RESET_CONTROL_CE_RST_MASK);
  1447. val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  1448. SOC_RESET_CONTROL_ADDRESS);
  1449. msleep(10);
  1450. /* unreset CE */
  1451. ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
  1452. val & ~SOC_RESET_CONTROL_CE_RST_MASK);
  1453. val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  1454. SOC_RESET_CONTROL_ADDRESS);
  1455. msleep(10);
  1456. ath10k_pci_warm_reset_si0(ar);
  1457. /* debug */
  1458. val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  1459. PCIE_INTR_CAUSE_ADDRESS);
  1460. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
  1461. val);
  1462. val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  1463. CPU_INTR_ADDRESS);
  1464. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
  1465. val);
  1466. /* CPU warm reset */
  1467. val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  1468. SOC_RESET_CONTROL_ADDRESS);
  1469. ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
  1470. val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
  1471. val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  1472. SOC_RESET_CONTROL_ADDRESS);
  1473. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n",
  1474. val);
  1475. msleep(100);
  1476. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
  1477. return 0;
  1478. }
  1479. static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
  1480. {
  1481. int ret;
  1482. /*
  1483. * Bring the target up cleanly.
  1484. *
  1485. * The target may be in an undefined state with an AUX-powered Target
  1486. * and a Host in WoW mode. If the Host crashes, loses power, or is
  1487. * restarted (without unloading the driver) then the Target is left
  1488. * (aux) powered and running. On a subsequent driver load, the Target
  1489. * is in an unexpected state. We try to catch that here in order to
  1490. * reset the Target and retry the probe.
  1491. */
  1492. if (cold_reset)
  1493. ret = ath10k_pci_cold_reset(ar);
  1494. else
  1495. ret = ath10k_pci_warm_reset(ar);
  1496. if (ret) {
  1497. ath10k_err(ar, "failed to reset target: %d\n", ret);
  1498. goto err;
  1499. }
  1500. ret = ath10k_pci_init_pipes(ar);
  1501. if (ret) {
  1502. ath10k_err(ar, "failed to initialize CE: %d\n", ret);
  1503. goto err;
  1504. }
  1505. ret = ath10k_pci_wait_for_target_init(ar);
  1506. if (ret) {
  1507. ath10k_err(ar, "failed to wait for target to init: %d\n", ret);
  1508. goto err_ce;
  1509. }
  1510. ret = ath10k_pci_init_config(ar);
  1511. if (ret) {
  1512. ath10k_err(ar, "failed to setup init config: %d\n", ret);
  1513. goto err_ce;
  1514. }
  1515. ret = ath10k_pci_wake_target_cpu(ar);
  1516. if (ret) {
  1517. ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
  1518. goto err_ce;
  1519. }
  1520. return 0;
  1521. err_ce:
  1522. ath10k_pci_ce_deinit(ar);
  1523. ath10k_pci_warm_reset(ar);
  1524. err:
  1525. return ret;
  1526. }
  1527. static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
  1528. {
  1529. int i, ret;
  1530. /*
  1531. * Sometime warm reset succeeds after retries.
  1532. *
  1533. * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
  1534. * at first try.
  1535. */
  1536. for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
  1537. ret = __ath10k_pci_hif_power_up(ar, false);
  1538. if (ret == 0)
  1539. break;
  1540. ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n",
  1541. i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
  1542. }
  1543. return ret;
  1544. }
  1545. static int ath10k_pci_hif_power_up(struct ath10k *ar)
  1546. {
  1547. int ret;
  1548. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
  1549. /*
  1550. * Hardware CUS232 version 2 has some issues with cold reset and the
  1551. * preferred (and safer) way to perform a device reset is through a
  1552. * warm reset.
  1553. *
  1554. * Warm reset doesn't always work though so fall back to cold reset may
  1555. * be necessary.
  1556. */
  1557. ret = ath10k_pci_hif_power_up_warm(ar);
  1558. if (ret) {
  1559. ath10k_warn(ar, "failed to power up target using warm reset: %d\n",
  1560. ret);
  1561. if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
  1562. return ret;
  1563. ath10k_warn(ar, "trying cold reset\n");
  1564. ret = __ath10k_pci_hif_power_up(ar, true);
  1565. if (ret) {
  1566. ath10k_err(ar, "failed to power up target using cold reset too (%d)\n",
  1567. ret);
  1568. return ret;
  1569. }
  1570. }
  1571. return 0;
  1572. }
  1573. static void ath10k_pci_hif_power_down(struct ath10k *ar)
  1574. {
  1575. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
  1576. ath10k_pci_warm_reset(ar);
  1577. }
  1578. #ifdef CONFIG_PM
  1579. #define ATH10K_PCI_PM_CONTROL 0x44
  1580. static int ath10k_pci_hif_suspend(struct ath10k *ar)
  1581. {
  1582. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1583. struct pci_dev *pdev = ar_pci->pdev;
  1584. u32 val;
  1585. pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
  1586. if ((val & 0x000000ff) != 0x3) {
  1587. pci_save_state(pdev);
  1588. pci_disable_device(pdev);
  1589. pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
  1590. (val & 0xffffff00) | 0x03);
  1591. }
  1592. return 0;
  1593. }
  1594. static int ath10k_pci_hif_resume(struct ath10k *ar)
  1595. {
  1596. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1597. struct pci_dev *pdev = ar_pci->pdev;
  1598. u32 val;
  1599. pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
  1600. if ((val & 0x000000ff) != 0) {
  1601. pci_restore_state(pdev);
  1602. pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
  1603. val & 0xffffff00);
  1604. /*
  1605. * Suspend/Resume resets the PCI configuration space,
  1606. * so we have to re-disable the RETRY_TIMEOUT register (0x41)
  1607. * to keep PCI Tx retries from interfering with C3 CPU state
  1608. */
  1609. pci_read_config_dword(pdev, 0x40, &val);
  1610. if ((val & 0x0000ff00) != 0)
  1611. pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
  1612. }
  1613. return 0;
  1614. }
  1615. #endif
  1616. static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
  1617. .tx_sg = ath10k_pci_hif_tx_sg,
  1618. .diag_read = ath10k_pci_hif_diag_read,
  1619. .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
  1620. .start = ath10k_pci_hif_start,
  1621. .stop = ath10k_pci_hif_stop,
  1622. .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
  1623. .get_default_pipe = ath10k_pci_hif_get_default_pipe,
  1624. .send_complete_check = ath10k_pci_hif_send_complete_check,
  1625. .set_callbacks = ath10k_pci_hif_set_callbacks,
  1626. .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
  1627. .power_up = ath10k_pci_hif_power_up,
  1628. .power_down = ath10k_pci_hif_power_down,
  1629. #ifdef CONFIG_PM
  1630. .suspend = ath10k_pci_hif_suspend,
  1631. .resume = ath10k_pci_hif_resume,
  1632. #endif
  1633. };
  1634. static void ath10k_pci_ce_tasklet(unsigned long ptr)
  1635. {
  1636. struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
  1637. struct ath10k_pci *ar_pci = pipe->ar_pci;
  1638. ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
  1639. }
  1640. static void ath10k_msi_err_tasklet(unsigned long data)
  1641. {
  1642. struct ath10k *ar = (struct ath10k *)data;
  1643. if (!ath10k_pci_has_fw_crashed(ar)) {
  1644. ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
  1645. return;
  1646. }
  1647. ath10k_pci_fw_crashed_clear(ar);
  1648. ath10k_pci_fw_crashed_dump(ar);
  1649. }
  1650. /*
  1651. * Handler for a per-engine interrupt on a PARTICULAR CE.
  1652. * This is used in cases where each CE has a private MSI interrupt.
  1653. */
  1654. static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
  1655. {
  1656. struct ath10k *ar = arg;
  1657. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1658. int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
  1659. if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
  1660. ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
  1661. ce_id);
  1662. return IRQ_HANDLED;
  1663. }
  1664. /*
  1665. * NOTE: We are able to derive ce_id from irq because we
  1666. * use a one-to-one mapping for CE's 0..5.
  1667. * CE's 6 & 7 do not use interrupts at all.
  1668. *
  1669. * This mapping must be kept in sync with the mapping
  1670. * used by firmware.
  1671. */
  1672. tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
  1673. return IRQ_HANDLED;
  1674. }
  1675. static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
  1676. {
  1677. struct ath10k *ar = arg;
  1678. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1679. tasklet_schedule(&ar_pci->msi_fw_err);
  1680. return IRQ_HANDLED;
  1681. }
  1682. /*
  1683. * Top-level interrupt handler for all PCI interrupts from a Target.
  1684. * When a block of MSI interrupts is allocated, this top-level handler
  1685. * is not used; instead, we directly call the correct sub-handler.
  1686. */
  1687. static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
  1688. {
  1689. struct ath10k *ar = arg;
  1690. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1691. if (ar_pci->num_msi_intrs == 0) {
  1692. if (!ath10k_pci_irq_pending(ar))
  1693. return IRQ_NONE;
  1694. ath10k_pci_disable_and_clear_legacy_irq(ar);
  1695. }
  1696. tasklet_schedule(&ar_pci->intr_tq);
  1697. return IRQ_HANDLED;
  1698. }
  1699. static void ath10k_pci_tasklet(unsigned long data)
  1700. {
  1701. struct ath10k *ar = (struct ath10k *)data;
  1702. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1703. if (ath10k_pci_has_fw_crashed(ar)) {
  1704. ath10k_pci_fw_crashed_clear(ar);
  1705. ath10k_pci_fw_crashed_dump(ar);
  1706. return;
  1707. }
  1708. ath10k_ce_per_engine_service_any(ar);
  1709. /* Re-enable legacy irq that was disabled in the irq handler */
  1710. if (ar_pci->num_msi_intrs == 0)
  1711. ath10k_pci_enable_legacy_irq(ar);
  1712. }
  1713. static int ath10k_pci_request_irq_msix(struct ath10k *ar)
  1714. {
  1715. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1716. int ret, i;
  1717. ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
  1718. ath10k_pci_msi_fw_handler,
  1719. IRQF_SHARED, "ath10k_pci", ar);
  1720. if (ret) {
  1721. ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
  1722. ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
  1723. return ret;
  1724. }
  1725. for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
  1726. ret = request_irq(ar_pci->pdev->irq + i,
  1727. ath10k_pci_per_engine_handler,
  1728. IRQF_SHARED, "ath10k_pci", ar);
  1729. if (ret) {
  1730. ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
  1731. ar_pci->pdev->irq + i, ret);
  1732. for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
  1733. free_irq(ar_pci->pdev->irq + i, ar);
  1734. free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
  1735. return ret;
  1736. }
  1737. }
  1738. return 0;
  1739. }
  1740. static int ath10k_pci_request_irq_msi(struct ath10k *ar)
  1741. {
  1742. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1743. int ret;
  1744. ret = request_irq(ar_pci->pdev->irq,
  1745. ath10k_pci_interrupt_handler,
  1746. IRQF_SHARED, "ath10k_pci", ar);
  1747. if (ret) {
  1748. ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
  1749. ar_pci->pdev->irq, ret);
  1750. return ret;
  1751. }
  1752. return 0;
  1753. }
  1754. static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
  1755. {
  1756. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1757. int ret;
  1758. ret = request_irq(ar_pci->pdev->irq,
  1759. ath10k_pci_interrupt_handler,
  1760. IRQF_SHARED, "ath10k_pci", ar);
  1761. if (ret) {
  1762. ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
  1763. ar_pci->pdev->irq, ret);
  1764. return ret;
  1765. }
  1766. return 0;
  1767. }
  1768. static int ath10k_pci_request_irq(struct ath10k *ar)
  1769. {
  1770. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1771. switch (ar_pci->num_msi_intrs) {
  1772. case 0:
  1773. return ath10k_pci_request_irq_legacy(ar);
  1774. case 1:
  1775. return ath10k_pci_request_irq_msi(ar);
  1776. case MSI_NUM_REQUEST:
  1777. return ath10k_pci_request_irq_msix(ar);
  1778. }
  1779. ath10k_warn(ar, "unknown irq configuration upon request\n");
  1780. return -EINVAL;
  1781. }
  1782. static void ath10k_pci_free_irq(struct ath10k *ar)
  1783. {
  1784. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1785. int i;
  1786. /* There's at least one interrupt irregardless whether its legacy INTR
  1787. * or MSI or MSI-X */
  1788. for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
  1789. free_irq(ar_pci->pdev->irq + i, ar);
  1790. }
  1791. static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
  1792. {
  1793. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1794. int i;
  1795. tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
  1796. tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
  1797. (unsigned long)ar);
  1798. for (i = 0; i < CE_COUNT; i++) {
  1799. ar_pci->pipe_info[i].ar_pci = ar_pci;
  1800. tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
  1801. (unsigned long)&ar_pci->pipe_info[i]);
  1802. }
  1803. }
  1804. static int ath10k_pci_init_irq(struct ath10k *ar)
  1805. {
  1806. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1807. int ret;
  1808. ath10k_pci_init_irq_tasklets(ar);
  1809. if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
  1810. ath10k_info(ar, "limiting irq mode to: %d\n",
  1811. ath10k_pci_irq_mode);
  1812. /* Try MSI-X */
  1813. if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
  1814. ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
  1815. ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
  1816. ar_pci->num_msi_intrs);
  1817. if (ret > 0)
  1818. return 0;
  1819. /* fall-through */
  1820. }
  1821. /* Try MSI */
  1822. if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
  1823. ar_pci->num_msi_intrs = 1;
  1824. ret = pci_enable_msi(ar_pci->pdev);
  1825. if (ret == 0)
  1826. return 0;
  1827. /* fall-through */
  1828. }
  1829. /* Try legacy irq
  1830. *
  1831. * A potential race occurs here: The CORE_BASE write
  1832. * depends on target correctly decoding AXI address but
  1833. * host won't know when target writes BAR to CORE_CTRL.
  1834. * This write might get lost if target has NOT written BAR.
  1835. * For now, fix the race by repeating the write in below
  1836. * synchronization checking. */
  1837. ar_pci->num_msi_intrs = 0;
  1838. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
  1839. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  1840. return 0;
  1841. }
  1842. static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
  1843. {
  1844. ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
  1845. 0);
  1846. }
  1847. static int ath10k_pci_deinit_irq(struct ath10k *ar)
  1848. {
  1849. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1850. switch (ar_pci->num_msi_intrs) {
  1851. case 0:
  1852. ath10k_pci_deinit_irq_legacy(ar);
  1853. return 0;
  1854. case 1:
  1855. /* fall-through */
  1856. case MSI_NUM_REQUEST:
  1857. pci_disable_msi(ar_pci->pdev);
  1858. return 0;
  1859. default:
  1860. pci_disable_msi(ar_pci->pdev);
  1861. }
  1862. ath10k_warn(ar, "unknown irq configuration upon deinit\n");
  1863. return -EINVAL;
  1864. }
  1865. static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
  1866. {
  1867. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1868. unsigned long timeout;
  1869. u32 val;
  1870. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
  1871. timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
  1872. do {
  1873. val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
  1874. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
  1875. val);
  1876. /* target should never return this */
  1877. if (val == 0xffffffff)
  1878. continue;
  1879. /* the device has crashed so don't bother trying anymore */
  1880. if (val & FW_IND_EVENT_PENDING)
  1881. break;
  1882. if (val & FW_IND_INITIALIZED)
  1883. break;
  1884. if (ar_pci->num_msi_intrs == 0)
  1885. /* Fix potential race by repeating CORE_BASE writes */
  1886. ath10k_pci_enable_legacy_irq(ar);
  1887. mdelay(10);
  1888. } while (time_before(jiffies, timeout));
  1889. ath10k_pci_disable_and_clear_legacy_irq(ar);
  1890. ath10k_pci_irq_msi_fw_mask(ar);
  1891. if (val == 0xffffffff) {
  1892. ath10k_err(ar, "failed to read device register, device is gone\n");
  1893. return -EIO;
  1894. }
  1895. if (val & FW_IND_EVENT_PENDING) {
  1896. ath10k_warn(ar, "device has crashed during init\n");
  1897. ath10k_pci_fw_crashed_clear(ar);
  1898. ath10k_pci_fw_crashed_dump(ar);
  1899. return -ECOMM;
  1900. }
  1901. if (!(val & FW_IND_INITIALIZED)) {
  1902. ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
  1903. val);
  1904. return -ETIMEDOUT;
  1905. }
  1906. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
  1907. return 0;
  1908. }
  1909. static int ath10k_pci_cold_reset(struct ath10k *ar)
  1910. {
  1911. int i;
  1912. u32 val;
  1913. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
  1914. spin_lock_bh(&ar->data_lock);
  1915. ar->stats.fw_cold_reset_counter++;
  1916. spin_unlock_bh(&ar->data_lock);
  1917. /* Put Target, including PCIe, into RESET. */
  1918. val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
  1919. val |= 1;
  1920. ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
  1921. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  1922. if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
  1923. RTC_STATE_COLD_RESET_MASK)
  1924. break;
  1925. msleep(1);
  1926. }
  1927. /* Pull Target, including PCIe, out of RESET. */
  1928. val &= ~1;
  1929. ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
  1930. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  1931. if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
  1932. RTC_STATE_COLD_RESET_MASK))
  1933. break;
  1934. msleep(1);
  1935. }
  1936. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
  1937. return 0;
  1938. }
  1939. static int ath10k_pci_claim(struct ath10k *ar)
  1940. {
  1941. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1942. struct pci_dev *pdev = ar_pci->pdev;
  1943. u32 lcr_val;
  1944. int ret;
  1945. pci_set_drvdata(pdev, ar);
  1946. ret = pci_enable_device(pdev);
  1947. if (ret) {
  1948. ath10k_err(ar, "failed to enable pci device: %d\n", ret);
  1949. return ret;
  1950. }
  1951. ret = pci_request_region(pdev, BAR_NUM, "ath");
  1952. if (ret) {
  1953. ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
  1954. ret);
  1955. goto err_device;
  1956. }
  1957. /* Target expects 32 bit DMA. Enforce it. */
  1958. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1959. if (ret) {
  1960. ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
  1961. goto err_region;
  1962. }
  1963. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  1964. if (ret) {
  1965. ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
  1966. ret);
  1967. goto err_region;
  1968. }
  1969. pci_set_master(pdev);
  1970. /* Workaround: Disable ASPM */
  1971. pci_read_config_dword(pdev, 0x80, &lcr_val);
  1972. pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
  1973. /* Arrange for access to Target SoC registers. */
  1974. ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
  1975. if (!ar_pci->mem) {
  1976. ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
  1977. ret = -EIO;
  1978. goto err_master;
  1979. }
  1980. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
  1981. return 0;
  1982. err_master:
  1983. pci_clear_master(pdev);
  1984. err_region:
  1985. pci_release_region(pdev, BAR_NUM);
  1986. err_device:
  1987. pci_disable_device(pdev);
  1988. return ret;
  1989. }
  1990. static void ath10k_pci_release(struct ath10k *ar)
  1991. {
  1992. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1993. struct pci_dev *pdev = ar_pci->pdev;
  1994. pci_iounmap(pdev, ar_pci->mem);
  1995. pci_release_region(pdev, BAR_NUM);
  1996. pci_clear_master(pdev);
  1997. pci_disable_device(pdev);
  1998. }
  1999. static int ath10k_pci_probe(struct pci_dev *pdev,
  2000. const struct pci_device_id *pci_dev)
  2001. {
  2002. int ret = 0;
  2003. struct ath10k *ar;
  2004. struct ath10k_pci *ar_pci;
  2005. u32 chip_id;
  2006. ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
  2007. ATH10K_BUS_PCI,
  2008. &ath10k_pci_hif_ops);
  2009. if (!ar) {
  2010. dev_err(&pdev->dev, "failed to allocate core\n");
  2011. return -ENOMEM;
  2012. }
  2013. ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
  2014. ar_pci = ath10k_pci_priv(ar);
  2015. ar_pci->pdev = pdev;
  2016. ar_pci->dev = &pdev->dev;
  2017. ar_pci->ar = ar;
  2018. spin_lock_init(&ar_pci->ce_lock);
  2019. setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
  2020. (unsigned long)ar);
  2021. ret = ath10k_pci_claim(ar);
  2022. if (ret) {
  2023. ath10k_err(ar, "failed to claim device: %d\n", ret);
  2024. goto err_core_destroy;
  2025. }
  2026. ret = ath10k_pci_wake(ar);
  2027. if (ret) {
  2028. ath10k_err(ar, "failed to wake up: %d\n", ret);
  2029. goto err_release;
  2030. }
  2031. chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
  2032. if (chip_id == 0xffffffff) {
  2033. ath10k_err(ar, "failed to get chip id\n");
  2034. goto err_sleep;
  2035. }
  2036. ret = ath10k_pci_alloc_pipes(ar);
  2037. if (ret) {
  2038. ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
  2039. ret);
  2040. goto err_sleep;
  2041. }
  2042. ath10k_pci_ce_deinit(ar);
  2043. ath10k_pci_irq_disable(ar);
  2044. ret = ath10k_pci_init_irq(ar);
  2045. if (ret) {
  2046. ath10k_err(ar, "failed to init irqs: %d\n", ret);
  2047. goto err_free_pipes;
  2048. }
  2049. ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
  2050. ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
  2051. ath10k_pci_irq_mode, ath10k_pci_reset_mode);
  2052. ret = ath10k_pci_request_irq(ar);
  2053. if (ret) {
  2054. ath10k_warn(ar, "failed to request irqs: %d\n", ret);
  2055. goto err_deinit_irq;
  2056. }
  2057. ret = ath10k_core_register(ar, chip_id);
  2058. if (ret) {
  2059. ath10k_err(ar, "failed to register driver core: %d\n", ret);
  2060. goto err_free_irq;
  2061. }
  2062. return 0;
  2063. err_free_irq:
  2064. ath10k_pci_free_irq(ar);
  2065. ath10k_pci_kill_tasklet(ar);
  2066. err_deinit_irq:
  2067. ath10k_pci_deinit_irq(ar);
  2068. err_free_pipes:
  2069. ath10k_pci_free_pipes(ar);
  2070. err_sleep:
  2071. ath10k_pci_sleep(ar);
  2072. err_release:
  2073. ath10k_pci_release(ar);
  2074. err_core_destroy:
  2075. ath10k_core_destroy(ar);
  2076. return ret;
  2077. }
  2078. static void ath10k_pci_remove(struct pci_dev *pdev)
  2079. {
  2080. struct ath10k *ar = pci_get_drvdata(pdev);
  2081. struct ath10k_pci *ar_pci;
  2082. ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
  2083. if (!ar)
  2084. return;
  2085. ar_pci = ath10k_pci_priv(ar);
  2086. if (!ar_pci)
  2087. return;
  2088. ath10k_core_unregister(ar);
  2089. ath10k_pci_free_irq(ar);
  2090. ath10k_pci_kill_tasklet(ar);
  2091. ath10k_pci_deinit_irq(ar);
  2092. ath10k_pci_ce_deinit(ar);
  2093. ath10k_pci_free_pipes(ar);
  2094. ath10k_pci_sleep(ar);
  2095. ath10k_pci_release(ar);
  2096. ath10k_core_destroy(ar);
  2097. }
  2098. MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
  2099. static struct pci_driver ath10k_pci_driver = {
  2100. .name = "ath10k_pci",
  2101. .id_table = ath10k_pci_id_table,
  2102. .probe = ath10k_pci_probe,
  2103. .remove = ath10k_pci_remove,
  2104. };
  2105. static int __init ath10k_pci_init(void)
  2106. {
  2107. int ret;
  2108. ret = pci_register_driver(&ath10k_pci_driver);
  2109. if (ret)
  2110. printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
  2111. ret);
  2112. return ret;
  2113. }
  2114. module_init(ath10k_pci_init);
  2115. static void __exit ath10k_pci_exit(void)
  2116. {
  2117. pci_unregister_driver(&ath10k_pci_driver);
  2118. }
  2119. module_exit(ath10k_pci_exit);
  2120. MODULE_AUTHOR("Qualcomm Atheros");
  2121. MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
  2122. MODULE_LICENSE("Dual BSD/GPL");
  2123. MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
  2124. MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
  2125. MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
  2126. MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);