sdio.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103
  1. /*
  2. * Copyright (c) 2004-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
  4. * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
  5. *
  6. * Permission to use, copy, modify, and/or distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <linux/module.h>
  19. #include <linux/mmc/card.h>
  20. #include <linux/mmc/mmc.h>
  21. #include <linux/mmc/host.h>
  22. #include <linux/mmc/sdio_func.h>
  23. #include <linux/mmc/sdio_ids.h>
  24. #include <linux/mmc/sdio.h>
  25. #include <linux/mmc/sd.h>
  26. #include <linux/bitfield.h>
  27. #include "core.h"
  28. #include "bmi.h"
  29. #include "debug.h"
  30. #include "hif.h"
  31. #include "htc.h"
  32. #include "targaddrs.h"
  33. #include "trace.h"
  34. #include "sdio.h"
  35. /* inlined helper functions */
  36. static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
  37. size_t len)
  38. {
  39. return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
  40. }
  41. static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
  42. {
  43. return (enum ath10k_htc_ep_id)pipe_id;
  44. }
  45. static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
  46. {
  47. dev_kfree_skb(pkt->skb);
  48. pkt->skb = NULL;
  49. pkt->alloc_len = 0;
  50. pkt->act_len = 0;
  51. pkt->trailer_only = false;
  52. }
  53. static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
  54. size_t act_len, size_t full_len,
  55. bool part_of_bundle,
  56. bool last_in_bundle)
  57. {
  58. pkt->skb = dev_alloc_skb(full_len);
  59. if (!pkt->skb)
  60. return -ENOMEM;
  61. pkt->act_len = act_len;
  62. pkt->alloc_len = full_len;
  63. pkt->part_of_bundle = part_of_bundle;
  64. pkt->last_in_bundle = last_in_bundle;
  65. pkt->trailer_only = false;
  66. return 0;
  67. }
  68. static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
  69. {
  70. bool trailer_only = false;
  71. struct ath10k_htc_hdr *htc_hdr =
  72. (struct ath10k_htc_hdr *)pkt->skb->data;
  73. u16 len = __le16_to_cpu(htc_hdr->len);
  74. if (len == htc_hdr->trailer_len)
  75. trailer_only = true;
  76. return trailer_only;
  77. }
  78. /* sdio/mmc functions */
  79. static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
  80. unsigned int address,
  81. unsigned char val)
  82. {
  83. *arg = FIELD_PREP(BIT(31), write) |
  84. FIELD_PREP(BIT(27), raw) |
  85. FIELD_PREP(BIT(26), 1) |
  86. FIELD_PREP(GENMASK(25, 9), address) |
  87. FIELD_PREP(BIT(8), 1) |
  88. FIELD_PREP(GENMASK(7, 0), val);
  89. }
  90. static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
  91. unsigned int address,
  92. unsigned char byte)
  93. {
  94. struct mmc_command io_cmd;
  95. memset(&io_cmd, 0, sizeof(io_cmd));
  96. ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
  97. io_cmd.opcode = SD_IO_RW_DIRECT;
  98. io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
  99. return mmc_wait_for_cmd(card->host, &io_cmd, 0);
  100. }
  101. static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
  102. unsigned int address,
  103. unsigned char *byte)
  104. {
  105. struct mmc_command io_cmd;
  106. int ret;
  107. memset(&io_cmd, 0, sizeof(io_cmd));
  108. ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
  109. io_cmd.opcode = SD_IO_RW_DIRECT;
  110. io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
  111. ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
  112. if (!ret)
  113. *byte = io_cmd.resp[0];
  114. return ret;
  115. }
  116. static int ath10k_sdio_config(struct ath10k *ar)
  117. {
  118. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  119. struct sdio_func *func = ar_sdio->func;
  120. unsigned char byte, asyncintdelay = 2;
  121. int ret;
  122. ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
  123. sdio_claim_host(func);
  124. byte = 0;
  125. ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
  126. SDIO_CCCR_DRIVE_STRENGTH,
  127. &byte);
  128. byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
  129. byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
  130. ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
  131. ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
  132. SDIO_CCCR_DRIVE_STRENGTH,
  133. byte);
  134. byte = 0;
  135. ret = ath10k_sdio_func0_cmd52_rd_byte(
  136. func->card,
  137. CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
  138. &byte);
  139. byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
  140. CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
  141. CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
  142. ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
  143. CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
  144. byte);
  145. if (ret) {
  146. ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
  147. goto out;
  148. }
  149. byte = 0;
  150. ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
  151. CCCR_SDIO_IRQ_MODE_REG_SDIO3,
  152. &byte);
  153. byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
  154. ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
  155. CCCR_SDIO_IRQ_MODE_REG_SDIO3,
  156. byte);
  157. if (ret) {
  158. ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
  159. ret);
  160. goto out;
  161. }
  162. byte = 0;
  163. ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
  164. CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
  165. &byte);
  166. byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
  167. byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
  168. ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
  169. CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
  170. byte);
  171. /* give us some time to enable, in ms */
  172. func->enable_timeout = 100;
  173. ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
  174. if (ret) {
  175. ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
  176. ar_sdio->mbox_info.block_size, ret);
  177. goto out;
  178. }
  179. out:
  180. sdio_release_host(func);
  181. return ret;
  182. }
  183. static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
  184. {
  185. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  186. struct sdio_func *func = ar_sdio->func;
  187. int ret;
  188. sdio_claim_host(func);
  189. sdio_writel(func, val, addr, &ret);
  190. if (ret) {
  191. ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
  192. val, addr, ret);
  193. goto out;
  194. }
  195. ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
  196. addr, val);
  197. out:
  198. sdio_release_host(func);
  199. return ret;
  200. }
  201. static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
  202. {
  203. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  204. struct sdio_func *func = ar_sdio->func;
  205. __le32 *buf;
  206. int ret;
  207. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  208. if (!buf)
  209. return -ENOMEM;
  210. *buf = cpu_to_le32(val);
  211. sdio_claim_host(func);
  212. ret = sdio_writesb(func, addr, buf, sizeof(*buf));
  213. if (ret) {
  214. ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
  215. val, addr, ret);
  216. goto out;
  217. }
  218. ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
  219. addr, val);
  220. out:
  221. sdio_release_host(func);
  222. kfree(buf);
  223. return ret;
  224. }
  225. static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
  226. {
  227. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  228. struct sdio_func *func = ar_sdio->func;
  229. int ret;
  230. sdio_claim_host(func);
  231. *val = sdio_readl(func, addr, &ret);
  232. if (ret) {
  233. ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
  234. addr, ret);
  235. goto out;
  236. }
  237. ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
  238. addr, *val);
  239. out:
  240. sdio_release_host(func);
  241. return ret;
  242. }
  243. static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
  244. {
  245. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  246. struct sdio_func *func = ar_sdio->func;
  247. int ret;
  248. sdio_claim_host(func);
  249. ret = sdio_memcpy_fromio(func, buf, addr, len);
  250. if (ret) {
  251. ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
  252. addr, ret);
  253. goto out;
  254. }
  255. ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
  256. addr, buf, len);
  257. ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
  258. out:
  259. sdio_release_host(func);
  260. return ret;
  261. }
  262. static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
  263. {
  264. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  265. struct sdio_func *func = ar_sdio->func;
  266. int ret;
  267. sdio_claim_host(func);
  268. /* For some reason toio() doesn't have const for the buffer, need
  269. * an ugly hack to workaround that.
  270. */
  271. ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
  272. if (ret) {
  273. ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
  274. addr, ret);
  275. goto out;
  276. }
  277. ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
  278. addr, buf, len);
  279. ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
  280. out:
  281. sdio_release_host(func);
  282. return ret;
  283. }
  284. static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
  285. {
  286. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  287. struct sdio_func *func = ar_sdio->func;
  288. int ret;
  289. sdio_claim_host(func);
  290. len = round_down(len, ar_sdio->mbox_info.block_size);
  291. ret = sdio_readsb(func, buf, addr, len);
  292. if (ret) {
  293. ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
  294. addr, ret);
  295. goto out;
  296. }
  297. ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
  298. addr, buf, len);
  299. ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
  300. out:
  301. sdio_release_host(func);
  302. return ret;
  303. }
  304. /* HIF mbox functions */
  305. static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
  306. struct ath10k_sdio_rx_data *pkt,
  307. u32 *lookaheads,
  308. int *n_lookaheads)
  309. {
  310. struct ath10k_htc *htc = &ar->htc;
  311. struct sk_buff *skb = pkt->skb;
  312. struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
  313. bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
  314. enum ath10k_htc_ep_id eid;
  315. u16 payload_len;
  316. u8 *trailer;
  317. int ret;
  318. payload_len = le16_to_cpu(htc_hdr->len);
  319. if (trailer_present) {
  320. trailer = skb->data + sizeof(*htc_hdr) +
  321. payload_len - htc_hdr->trailer_len;
  322. eid = pipe_id_to_eid(htc_hdr->eid);
  323. ret = ath10k_htc_process_trailer(htc,
  324. trailer,
  325. htc_hdr->trailer_len,
  326. eid,
  327. lookaheads,
  328. n_lookaheads);
  329. if (ret)
  330. return ret;
  331. if (is_trailer_only_msg(pkt))
  332. pkt->trailer_only = true;
  333. skb_trim(skb, skb->len - htc_hdr->trailer_len);
  334. }
  335. skb_pull(skb, sizeof(*htc_hdr));
  336. return 0;
  337. }
  338. static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
  339. u32 lookaheads[],
  340. int *n_lookahead)
  341. {
  342. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  343. struct ath10k_htc *htc = &ar->htc;
  344. struct ath10k_sdio_rx_data *pkt;
  345. struct ath10k_htc_ep *ep;
  346. enum ath10k_htc_ep_id id;
  347. int ret, i, *n_lookahead_local;
  348. u32 *lookaheads_local;
  349. for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
  350. lookaheads_local = lookaheads;
  351. n_lookahead_local = n_lookahead;
  352. id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid;
  353. if (id >= ATH10K_HTC_EP_COUNT) {
  354. ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
  355. id);
  356. ret = -ENOMEM;
  357. goto out;
  358. }
  359. ep = &htc->endpoint[id];
  360. if (ep->service_id == 0) {
  361. ath10k_warn(ar, "ep %d is not connected\n", id);
  362. ret = -ENOMEM;
  363. goto out;
  364. }
  365. pkt = &ar_sdio->rx_pkts[i];
  366. if (pkt->part_of_bundle && !pkt->last_in_bundle) {
  367. /* Only read lookahead's from RX trailers
  368. * for the last packet in a bundle.
  369. */
  370. lookaheads_local = NULL;
  371. n_lookahead_local = NULL;
  372. }
  373. ret = ath10k_sdio_mbox_rx_process_packet(ar,
  374. pkt,
  375. lookaheads_local,
  376. n_lookahead_local);
  377. if (ret)
  378. goto out;
  379. if (!pkt->trailer_only)
  380. ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
  381. else
  382. kfree_skb(pkt->skb);
  383. /* The RX complete handler now owns the skb...*/
  384. pkt->skb = NULL;
  385. pkt->alloc_len = 0;
  386. }
  387. ret = 0;
  388. out:
  389. /* Free all packets that was not passed on to the RX completion
  390. * handler...
  391. */
  392. for (; i < ar_sdio->n_rx_pkts; i++)
  393. ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
  394. return ret;
  395. }
  396. static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
  397. struct ath10k_sdio_rx_data *rx_pkts,
  398. struct ath10k_htc_hdr *htc_hdr,
  399. size_t full_len, size_t act_len,
  400. size_t *bndl_cnt)
  401. {
  402. int ret, i;
  403. *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
  404. if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE) {
  405. ath10k_warn(ar,
  406. "HTC bundle length %u exceeds maximum %u\n",
  407. le16_to_cpu(htc_hdr->len),
  408. HTC_HOST_MAX_MSG_PER_BUNDLE);
  409. return -ENOMEM;
  410. }
  411. /* Allocate bndl_cnt extra skb's for the bundle.
  412. * The package containing the
  413. * ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included
  414. * in bndl_cnt. The skb for that packet will be
  415. * allocated separately.
  416. */
  417. for (i = 0; i < *bndl_cnt; i++) {
  418. ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
  419. act_len,
  420. full_len,
  421. true,
  422. false);
  423. if (ret)
  424. return ret;
  425. }
  426. return 0;
  427. }
  428. static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
  429. u32 lookaheads[], int n_lookaheads)
  430. {
  431. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  432. struct ath10k_htc_hdr *htc_hdr;
  433. size_t full_len, act_len;
  434. bool last_in_bundle;
  435. int ret, i;
  436. if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
  437. ath10k_warn(ar,
  438. "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
  439. n_lookaheads,
  440. ATH10K_SDIO_MAX_RX_MSGS);
  441. ret = -ENOMEM;
  442. goto err;
  443. }
  444. for (i = 0; i < n_lookaheads; i++) {
  445. htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
  446. last_in_bundle = false;
  447. if (le16_to_cpu(htc_hdr->len) >
  448. ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
  449. ath10k_warn(ar,
  450. "payload length %d exceeds max htc length: %zu\n",
  451. le16_to_cpu(htc_hdr->len),
  452. ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
  453. ret = -ENOMEM;
  454. goto err;
  455. }
  456. act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
  457. full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
  458. if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
  459. ath10k_warn(ar,
  460. "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
  461. htc_hdr->eid, htc_hdr->flags,
  462. le16_to_cpu(htc_hdr->len));
  463. ret = -EINVAL;
  464. goto err;
  465. }
  466. if (htc_hdr->flags & ATH10K_HTC_FLAG_BUNDLE_MASK) {
  467. /* HTC header indicates that every packet to follow
  468. * has the same padded length so that it can be
  469. * optimally fetched as a full bundle.
  470. */
  471. size_t bndl_cnt;
  472. ret = ath10k_sdio_mbox_alloc_pkt_bundle(ar,
  473. &ar_sdio->rx_pkts[i],
  474. htc_hdr,
  475. full_len,
  476. act_len,
  477. &bndl_cnt);
  478. n_lookaheads += bndl_cnt;
  479. i += bndl_cnt;
  480. /*Next buffer will be the last in the bundle */
  481. last_in_bundle = true;
  482. }
  483. /* Allocate skb for packet. If the packet had the
  484. * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
  485. * packet skb's have been allocated in the previous step.
  486. */
  487. ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
  488. act_len,
  489. full_len,
  490. last_in_bundle,
  491. last_in_bundle);
  492. }
  493. ar_sdio->n_rx_pkts = i;
  494. return 0;
  495. err:
  496. for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
  497. if (!ar_sdio->rx_pkts[i].alloc_len)
  498. break;
  499. ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
  500. }
  501. return ret;
  502. }
  503. static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
  504. struct ath10k_sdio_rx_data *pkt)
  505. {
  506. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  507. struct sk_buff *skb = pkt->skb;
  508. int ret;
  509. ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
  510. skb->data, pkt->alloc_len);
  511. pkt->status = ret;
  512. if (!ret)
  513. skb_put(skb, pkt->act_len);
  514. return ret;
  515. }
  516. static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
  517. {
  518. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  519. int ret, i;
  520. for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
  521. ret = ath10k_sdio_mbox_rx_packet(ar,
  522. &ar_sdio->rx_pkts[i]);
  523. if (ret)
  524. goto err;
  525. }
  526. return 0;
  527. err:
  528. /* Free all packets that was not successfully fetched. */
  529. for (; i < ar_sdio->n_rx_pkts; i++)
  530. ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
  531. return ret;
  532. }
  533. /* This is the timeout for mailbox processing done in the sdio irq
  534. * handler. The timeout is deliberately set quite high since SDIO dump logs
  535. * over serial port can/will add a substantial overhead to the processing
  536. * (if enabled).
  537. */
  538. #define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
  539. static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
  540. u32 msg_lookahead, bool *done)
  541. {
  542. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  543. u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
  544. int n_lookaheads = 1;
  545. unsigned long timeout;
  546. int ret;
  547. *done = true;
  548. /* Copy the lookahead obtained from the HTC register table into our
  549. * temp array as a start value.
  550. */
  551. lookaheads[0] = msg_lookahead;
  552. timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
  553. do {
  554. /* Try to allocate as many HTC RX packets indicated by
  555. * n_lookaheads.
  556. */
  557. ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
  558. n_lookaheads);
  559. if (ret)
  560. break;
  561. if (ar_sdio->n_rx_pkts >= 2)
  562. /* A recv bundle was detected, force IRQ status
  563. * re-check again.
  564. */
  565. *done = false;
  566. ret = ath10k_sdio_mbox_rx_fetch(ar);
  567. /* Process fetched packets. This will potentially update
  568. * n_lookaheads depending on if the packets contain lookahead
  569. * reports.
  570. */
  571. n_lookaheads = 0;
  572. ret = ath10k_sdio_mbox_rx_process_packets(ar,
  573. lookaheads,
  574. &n_lookaheads);
  575. if (!n_lookaheads || ret)
  576. break;
  577. /* For SYNCH processing, if we get here, we are running
  578. * through the loop again due to updated lookaheads. Set
  579. * flag that we should re-check IRQ status registers again
  580. * before leaving IRQ processing, this can net better
  581. * performance in high throughput situations.
  582. */
  583. *done = false;
  584. } while (time_before(jiffies, timeout));
  585. if (ret && (ret != -ECANCELED))
  586. ath10k_warn(ar, "failed to get pending recv messages: %d\n",
  587. ret);
  588. return ret;
  589. }
  590. static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
  591. {
  592. u32 val;
  593. int ret;
  594. /* TODO: Add firmware crash handling */
  595. ath10k_warn(ar, "firmware crashed\n");
  596. /* read counter to clear the interrupt, the debug error interrupt is
  597. * counter 0.
  598. */
  599. ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
  600. if (ret)
  601. ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
  602. return ret;
  603. }
  604. static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
  605. {
  606. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  607. struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
  608. u8 counter_int_status;
  609. int ret;
  610. mutex_lock(&irq_data->mtx);
  611. counter_int_status = irq_data->irq_proc_reg->counter_int_status &
  612. irq_data->irq_en_reg->cntr_int_status_en;
  613. /* NOTE: other modules like GMBOX may use the counter interrupt for
  614. * credit flow control on other counters, we only need to check for
  615. * the debug assertion counter interrupt.
  616. */
  617. if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
  618. ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
  619. else
  620. ret = 0;
  621. mutex_unlock(&irq_data->mtx);
  622. return ret;
  623. }
  624. static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
  625. {
  626. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  627. struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
  628. u8 error_int_status;
  629. int ret;
  630. ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
  631. error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
  632. if (!error_int_status) {
  633. ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
  634. error_int_status);
  635. return -EIO;
  636. }
  637. ath10k_dbg(ar, ATH10K_DBG_SDIO,
  638. "sdio error_int_status 0x%x\n", error_int_status);
  639. if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
  640. error_int_status))
  641. ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
  642. if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
  643. error_int_status))
  644. ath10k_warn(ar, "rx underflow interrupt error\n");
  645. if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
  646. error_int_status))
  647. ath10k_warn(ar, "tx overflow interrupt error\n");
  648. /* Clear the interrupt */
  649. irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
  650. /* set W1C value to clear the interrupt, this hits the register first */
  651. ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
  652. error_int_status);
  653. if (ret) {
  654. ath10k_warn(ar, "unable to write to error int status address: %d\n",
  655. ret);
  656. return ret;
  657. }
  658. return 0;
  659. }
  660. static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
  661. {
  662. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  663. struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
  664. u8 cpu_int_status;
  665. int ret;
  666. mutex_lock(&irq_data->mtx);
  667. cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
  668. irq_data->irq_en_reg->cpu_int_status_en;
  669. if (!cpu_int_status) {
  670. ath10k_warn(ar, "CPU interrupt status is zero\n");
  671. ret = -EIO;
  672. goto out;
  673. }
  674. /* Clear the interrupt */
  675. irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
  676. /* Set up the register transfer buffer to hit the register 4 times,
  677. * this is done to make the access 4-byte aligned to mitigate issues
  678. * with host bus interconnects that restrict bus transfer lengths to
  679. * be a multiple of 4-bytes.
  680. *
  681. * Set W1C value to clear the interrupt, this hits the register first.
  682. */
  683. ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
  684. cpu_int_status);
  685. if (ret) {
  686. ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
  687. ret);
  688. goto out;
  689. }
  690. out:
  691. mutex_unlock(&irq_data->mtx);
  692. return ret;
  693. }
  694. static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
  695. u8 *host_int_status,
  696. u32 *lookahead)
  697. {
  698. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  699. struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
  700. struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
  701. struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
  702. u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
  703. int ret;
  704. mutex_lock(&irq_data->mtx);
  705. *lookahead = 0;
  706. *host_int_status = 0;
  707. /* int_status_en is supposed to be non zero, otherwise interrupts
  708. * shouldn't be enabled. There is however a short time frame during
  709. * initialization between the irq register and int_status_en init
  710. * where this can happen.
  711. * We silently ignore this condition.
  712. */
  713. if (!irq_en_reg->int_status_en) {
  714. ret = 0;
  715. goto out;
  716. }
  717. /* Read the first sizeof(struct ath10k_irq_proc_registers)
  718. * bytes of the HTC register table. This
  719. * will yield us the value of different int status
  720. * registers and the lookahead registers.
  721. */
  722. ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
  723. irq_proc_reg, sizeof(*irq_proc_reg));
  724. if (ret)
  725. goto out;
  726. /* Update only those registers that are enabled */
  727. *host_int_status = irq_proc_reg->host_int_status &
  728. irq_en_reg->int_status_en;
  729. /* Look at mbox status */
  730. if (!(*host_int_status & htc_mbox)) {
  731. *lookahead = 0;
  732. ret = 0;
  733. goto out;
  734. }
  735. /* Mask out pending mbox value, we use look ahead as
  736. * the real flag for mbox processing.
  737. */
  738. *host_int_status &= ~htc_mbox;
  739. if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
  740. *lookahead = le32_to_cpu(
  741. irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
  742. if (!*lookahead)
  743. ath10k_warn(ar, "sdio mbox lookahead is zero\n");
  744. }
  745. out:
  746. mutex_unlock(&irq_data->mtx);
  747. return ret;
  748. }
  749. static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
  750. bool *done)
  751. {
  752. u8 host_int_status;
  753. u32 lookahead;
  754. int ret;
  755. /* NOTE: HIF implementation guarantees that the context of this
  756. * call allows us to perform SYNCHRONOUS I/O, that is we can block,
  757. * sleep or call any API that can block or switch thread/task
  758. * contexts. This is a fully schedulable context.
  759. */
  760. ret = ath10k_sdio_mbox_read_int_status(ar,
  761. &host_int_status,
  762. &lookahead);
  763. if (ret) {
  764. *done = true;
  765. goto out;
  766. }
  767. if (!host_int_status && !lookahead) {
  768. ret = 0;
  769. *done = true;
  770. goto out;
  771. }
  772. if (lookahead) {
  773. ath10k_dbg(ar, ATH10K_DBG_SDIO,
  774. "sdio pending mailbox msg lookahead 0x%08x\n",
  775. lookahead);
  776. ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
  777. lookahead,
  778. done);
  779. if (ret)
  780. goto out;
  781. }
  782. /* now, handle the rest of the interrupts */
  783. ath10k_dbg(ar, ATH10K_DBG_SDIO,
  784. "sdio host_int_status 0x%x\n", host_int_status);
  785. if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
  786. /* CPU Interrupt */
  787. ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
  788. if (ret)
  789. goto out;
  790. }
  791. if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
  792. /* Error Interrupt */
  793. ret = ath10k_sdio_mbox_proc_err_intr(ar);
  794. if (ret)
  795. goto out;
  796. }
  797. if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
  798. /* Counter Interrupt */
  799. ret = ath10k_sdio_mbox_proc_counter_intr(ar);
  800. ret = 0;
  801. out:
  802. /* An optimization to bypass reading the IRQ status registers
  803. * unecessarily which can re-wake the target, if upper layers
  804. * determine that we are in a low-throughput mode, we can rely on
  805. * taking another interrupt rather than re-checking the status
  806. * registers which can re-wake the target.
  807. *
  808. * NOTE : for host interfaces that makes use of detecting pending
  809. * mbox messages at hif can not use this optimization due to
  810. * possible side effects, SPI requires the host to drain all
  811. * messages from the mailbox before exiting the ISR routine.
  812. */
  813. ath10k_dbg(ar, ATH10K_DBG_SDIO,
  814. "sdio pending irqs done %d status %d",
  815. *done, ret);
  816. return ret;
  817. }
  818. static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
  819. {
  820. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  821. struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
  822. u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
  823. mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
  824. mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
  825. mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
  826. mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
  827. mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
  828. mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
  829. dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, device);
  830. dev_id_chiprev = FIELD_GET(QCA_MANUFACTURER_ID_REV_MASK, device);
  831. switch (dev_id_base) {
  832. case QCA_MANUFACTURER_ID_AR6005_BASE:
  833. if (dev_id_chiprev < 4)
  834. mbox_info->ext_info[0].htc_ext_sz =
  835. ATH10K_HIF_MBOX0_EXT_WIDTH;
  836. else
  837. /* from QCA6174 2.0(0x504), the width has been extended
  838. * to 56K
  839. */
  840. mbox_info->ext_info[0].htc_ext_sz =
  841. ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
  842. break;
  843. case QCA_MANUFACTURER_ID_QCA9377_BASE:
  844. mbox_info->ext_info[0].htc_ext_sz =
  845. ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
  846. break;
  847. default:
  848. mbox_info->ext_info[0].htc_ext_sz =
  849. ATH10K_HIF_MBOX0_EXT_WIDTH;
  850. }
  851. mbox_info->ext_info[1].htc_ext_addr =
  852. mbox_info->ext_info[0].htc_ext_addr +
  853. mbox_info->ext_info[0].htc_ext_sz +
  854. ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
  855. mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
  856. }
  857. /* BMI functions */
  858. static int ath10k_sdio_bmi_credits(struct ath10k *ar)
  859. {
  860. u32 addr, cmd_credits;
  861. unsigned long timeout;
  862. int ret;
  863. /* Read the counter register to get the command credits */
  864. addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
  865. timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
  866. cmd_credits = 0;
  867. while (time_before(jiffies, timeout) && !cmd_credits) {
  868. /* Hit the credit counter with a 4-byte access, the first byte
  869. * read will hit the counter and cause a decrement, while the
  870. * remaining 3 bytes has no effect. The rationale behind this
  871. * is to make all HIF accesses 4-byte aligned.
  872. */
  873. ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
  874. if (ret) {
  875. ath10k_warn(ar,
  876. "unable to decrement the command credit count register: %d\n",
  877. ret);
  878. return ret;
  879. }
  880. /* The counter is only 8 bits.
  881. * Ignore anything in the upper 3 bytes
  882. */
  883. cmd_credits &= 0xFF;
  884. }
  885. if (!cmd_credits) {
  886. ath10k_warn(ar, "bmi communication timeout\n");
  887. return -ETIMEDOUT;
  888. }
  889. return 0;
  890. }
  891. static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
  892. {
  893. unsigned long timeout;
  894. u32 rx_word;
  895. int ret;
  896. timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
  897. rx_word = 0;
  898. while ((time_before(jiffies, timeout)) && !rx_word) {
  899. ret = ath10k_sdio_read32(ar,
  900. MBOX_HOST_INT_STATUS_ADDRESS,
  901. &rx_word);
  902. if (ret) {
  903. ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
  904. return ret;
  905. }
  906. /* all we really want is one bit */
  907. rx_word &= 1;
  908. }
  909. if (!rx_word) {
  910. ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
  911. return -EINVAL;
  912. }
  913. return ret;
  914. }
  915. static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
  916. void *req, u32 req_len,
  917. void *resp, u32 *resp_len)
  918. {
  919. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  920. u32 addr;
  921. int ret;
  922. if (req) {
  923. ret = ath10k_sdio_bmi_credits(ar);
  924. if (ret)
  925. return ret;
  926. addr = ar_sdio->mbox_info.htc_addr;
  927. memcpy(ar_sdio->bmi_buf, req, req_len);
  928. ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
  929. if (ret) {
  930. ath10k_warn(ar,
  931. "unable to send the bmi data to the device: %d\n",
  932. ret);
  933. return ret;
  934. }
  935. }
  936. if (!resp || !resp_len)
  937. /* No response expected */
  938. return 0;
  939. /* During normal bootup, small reads may be required.
  940. * Rather than issue an HIF Read and then wait as the Target
  941. * adds successive bytes to the FIFO, we wait here until
  942. * we know that response data is available.
  943. *
  944. * This allows us to cleanly timeout on an unexpected
  945. * Target failure rather than risk problems at the HIF level.
  946. * In particular, this avoids SDIO timeouts and possibly garbage
  947. * data on some host controllers. And on an interconnect
  948. * such as Compact Flash (as well as some SDIO masters) which
  949. * does not provide any indication on data timeout, it avoids
  950. * a potential hang or garbage response.
  951. *
  952. * Synchronization is more difficult for reads larger than the
  953. * size of the MBOX FIFO (128B), because the Target is unable
  954. * to push the 129th byte of data until AFTER the Host posts an
  955. * HIF Read and removes some FIFO data. So for large reads the
  956. * Host proceeds to post an HIF Read BEFORE all the data is
  957. * actually available to read. Fortunately, large BMI reads do
  958. * not occur in practice -- they're supported for debug/development.
  959. *
  960. * So Host/Target BMI synchronization is divided into these cases:
  961. * CASE 1: length < 4
  962. * Should not happen
  963. *
  964. * CASE 2: 4 <= length <= 128
  965. * Wait for first 4 bytes to be in FIFO
  966. * If CONSERVATIVE_BMI_READ is enabled, also wait for
  967. * a BMI command credit, which indicates that the ENTIRE
  968. * response is available in the the FIFO
  969. *
  970. * CASE 3: length > 128
  971. * Wait for the first 4 bytes to be in FIFO
  972. *
  973. * For most uses, a small timeout should be sufficient and we will
  974. * usually see a response quickly; but there may be some unusual
  975. * (debug) cases of BMI_EXECUTE where we want an larger timeout.
  976. * For now, we use an unbounded busy loop while waiting for
  977. * BMI_EXECUTE.
  978. *
  979. * If BMI_EXECUTE ever needs to support longer-latency execution,
  980. * especially in production, this code needs to be enhanced to sleep
  981. * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
  982. * a function of Host processor speed.
  983. */
  984. ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
  985. if (ret)
  986. return ret;
  987. /* We always read from the start of the mbox address */
  988. addr = ar_sdio->mbox_info.htc_addr;
  989. ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
  990. if (ret) {
  991. ath10k_warn(ar,
  992. "unable to read the bmi data from the device: %d\n",
  993. ret);
  994. return ret;
  995. }
  996. memcpy(resp, ar_sdio->bmi_buf, *resp_len);
  997. return 0;
  998. }
  999. /* sdio async handling functions */
  1000. static struct ath10k_sdio_bus_request
  1001. *ath10k_sdio_alloc_busreq(struct ath10k *ar)
  1002. {
  1003. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  1004. struct ath10k_sdio_bus_request *bus_req;
  1005. spin_lock_bh(&ar_sdio->lock);
  1006. if (list_empty(&ar_sdio->bus_req_freeq)) {
  1007. bus_req = NULL;
  1008. goto out;
  1009. }
  1010. bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
  1011. struct ath10k_sdio_bus_request, list);
  1012. list_del(&bus_req->list);
  1013. out:
  1014. spin_unlock_bh(&ar_sdio->lock);
  1015. return bus_req;
  1016. }
  1017. static void ath10k_sdio_free_bus_req(struct ath10k *ar,
  1018. struct ath10k_sdio_bus_request *bus_req)
  1019. {
  1020. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  1021. memset(bus_req, 0, sizeof(*bus_req));
  1022. spin_lock_bh(&ar_sdio->lock);
  1023. list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
  1024. spin_unlock_bh(&ar_sdio->lock);
  1025. }
  1026. static void __ath10k_sdio_write_async(struct ath10k *ar,
  1027. struct ath10k_sdio_bus_request *req)
  1028. {
  1029. struct ath10k_htc_ep *ep;
  1030. struct sk_buff *skb;
  1031. int ret;
  1032. skb = req->skb;
  1033. ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
  1034. if (ret)
  1035. ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
  1036. req->address, ret);
  1037. if (req->htc_msg) {
  1038. ep = &ar->htc.endpoint[req->eid];
  1039. ath10k_htc_notify_tx_completion(ep, skb);
  1040. } else if (req->comp) {
  1041. complete(req->comp);
  1042. }
  1043. ath10k_sdio_free_bus_req(ar, req);
  1044. }
  1045. static void ath10k_sdio_write_async_work(struct work_struct *work)
  1046. {
  1047. struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
  1048. wr_async_work);
  1049. struct ath10k *ar = ar_sdio->ar;
  1050. struct ath10k_sdio_bus_request *req, *tmp_req;
  1051. spin_lock_bh(&ar_sdio->wr_async_lock);
  1052. list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
  1053. list_del(&req->list);
  1054. spin_unlock_bh(&ar_sdio->wr_async_lock);
  1055. __ath10k_sdio_write_async(ar, req);
  1056. spin_lock_bh(&ar_sdio->wr_async_lock);
  1057. }
  1058. spin_unlock_bh(&ar_sdio->wr_async_lock);
  1059. }
  1060. static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
  1061. struct sk_buff *skb,
  1062. struct completion *comp,
  1063. bool htc_msg, enum ath10k_htc_ep_id eid)
  1064. {
  1065. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  1066. struct ath10k_sdio_bus_request *bus_req;
  1067. /* Allocate a bus request for the message and queue it on the
  1068. * SDIO workqueue.
  1069. */
  1070. bus_req = ath10k_sdio_alloc_busreq(ar);
  1071. if (!bus_req) {
  1072. ath10k_warn(ar,
  1073. "unable to allocate bus request for async request\n");
  1074. return -ENOMEM;
  1075. }
  1076. bus_req->skb = skb;
  1077. bus_req->eid = eid;
  1078. bus_req->address = addr;
  1079. bus_req->htc_msg = htc_msg;
  1080. bus_req->comp = comp;
  1081. spin_lock_bh(&ar_sdio->wr_async_lock);
  1082. list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
  1083. spin_unlock_bh(&ar_sdio->wr_async_lock);
  1084. return 0;
  1085. }
  1086. /* IRQ handler */
  1087. static void ath10k_sdio_irq_handler(struct sdio_func *func)
  1088. {
  1089. struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
  1090. struct ath10k *ar = ar_sdio->ar;
  1091. unsigned long timeout;
  1092. bool done = false;
  1093. int ret;
  1094. /* Release the host during interrupts so we can pick it back up when
  1095. * we process commands.
  1096. */
  1097. sdio_release_host(ar_sdio->func);
  1098. timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
  1099. do {
  1100. ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
  1101. if (ret)
  1102. break;
  1103. } while (time_before(jiffies, timeout) && !done);
  1104. sdio_claim_host(ar_sdio->func);
  1105. if (ret && ret != -ECANCELED)
  1106. ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
  1107. ret);
  1108. }
  1109. /* sdio HIF functions */
  1110. static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar)
  1111. {
  1112. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  1113. struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
  1114. struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
  1115. int ret;
  1116. mutex_lock(&irq_data->mtx);
  1117. memset(regs, 0, sizeof(*regs));
  1118. ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
  1119. &regs->int_status_en, sizeof(*regs));
  1120. if (ret)
  1121. ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
  1122. mutex_unlock(&irq_data->mtx);
  1123. return ret;
  1124. }
  1125. static int ath10k_sdio_hif_power_up(struct ath10k *ar)
  1126. {
  1127. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  1128. struct sdio_func *func = ar_sdio->func;
  1129. int ret;
  1130. if (!ar_sdio->is_disabled)
  1131. return 0;
  1132. ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
  1133. sdio_claim_host(func);
  1134. ret = sdio_enable_func(func);
  1135. if (ret) {
  1136. ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
  1137. sdio_release_host(func);
  1138. return ret;
  1139. }
  1140. sdio_release_host(func);
  1141. /* Wait for hardware to initialise. It should take a lot less than
  1142. * 20 ms but let's be conservative here.
  1143. */
  1144. msleep(20);
  1145. ar_sdio->is_disabled = false;
  1146. ret = ath10k_sdio_hif_disable_intrs(ar);
  1147. if (ret)
  1148. return ret;
  1149. return 0;
  1150. }
  1151. static void ath10k_sdio_hif_power_down(struct ath10k *ar)
  1152. {
  1153. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  1154. int ret;
  1155. if (ar_sdio->is_disabled)
  1156. return;
  1157. ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
  1158. /* Disable the card */
  1159. sdio_claim_host(ar_sdio->func);
  1160. ret = sdio_disable_func(ar_sdio->func);
  1161. sdio_release_host(ar_sdio->func);
  1162. if (ret)
  1163. ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
  1164. ar_sdio->is_disabled = true;
  1165. }
  1166. static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
  1167. struct ath10k_hif_sg_item *items, int n_items)
  1168. {
  1169. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  1170. enum ath10k_htc_ep_id eid;
  1171. struct sk_buff *skb;
  1172. int ret, i;
  1173. eid = pipe_id_to_eid(pipe_id);
  1174. for (i = 0; i < n_items; i++) {
  1175. size_t padded_len;
  1176. u32 address;
  1177. skb = items[i].transfer_context;
  1178. padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
  1179. skb->len);
  1180. skb_trim(skb, padded_len);
  1181. /* Write TX data to the end of the mbox address space */
  1182. address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
  1183. skb->len;
  1184. ret = ath10k_sdio_prep_async_req(ar, address, skb,
  1185. NULL, true, eid);
  1186. if (ret)
  1187. return ret;
  1188. }
  1189. queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
  1190. return 0;
  1191. }
  1192. static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
  1193. {
  1194. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  1195. struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
  1196. struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
  1197. int ret;
  1198. mutex_lock(&irq_data->mtx);
  1199. /* Enable all but CPU interrupts */
  1200. regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
  1201. FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
  1202. FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
  1203. /* NOTE: There are some cases where HIF can do detection of
  1204. * pending mbox messages which is disabled now.
  1205. */
  1206. regs->int_status_en |=
  1207. FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
  1208. /* Set up the CPU Interrupt status Register */
  1209. regs->cpu_int_status_en = 0;
  1210. /* Set up the Error Interrupt status Register */
  1211. regs->err_int_status_en =
  1212. FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
  1213. FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
  1214. /* Enable Counter interrupt status register to get fatal errors for
  1215. * debugging.
  1216. */
  1217. regs->cntr_int_status_en =
  1218. FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
  1219. ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
  1220. ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
  1221. &regs->int_status_en, sizeof(*regs));
  1222. if (ret)
  1223. ath10k_warn(ar,
  1224. "failed to update mbox interrupt status register : %d\n",
  1225. ret);
  1226. mutex_unlock(&irq_data->mtx);
  1227. return ret;
  1228. }
  1229. static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
  1230. {
  1231. u32 val;
  1232. int ret;
  1233. ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
  1234. if (ret) {
  1235. ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
  1236. ret);
  1237. return ret;
  1238. }
  1239. if (enable_sleep)
  1240. val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
  1241. else
  1242. val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
  1243. ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
  1244. if (ret) {
  1245. ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
  1246. ret);
  1247. return ret;
  1248. }
  1249. return 0;
  1250. }
  1251. /* HIF diagnostics */
  1252. static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
  1253. size_t buf_len)
  1254. {
  1255. int ret;
  1256. /* set window register to start read cycle */
  1257. ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
  1258. if (ret) {
  1259. ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
  1260. return ret;
  1261. }
  1262. /* read the data */
  1263. ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
  1264. if (ret) {
  1265. ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
  1266. ret);
  1267. return ret;
  1268. }
  1269. return 0;
  1270. }
  1271. static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
  1272. u32 *value)
  1273. {
  1274. __le32 *val;
  1275. int ret;
  1276. val = kzalloc(sizeof(*val), GFP_KERNEL);
  1277. if (!val)
  1278. return -ENOMEM;
  1279. ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
  1280. if (ret)
  1281. goto out;
  1282. *value = __le32_to_cpu(*val);
  1283. out:
  1284. kfree(val);
  1285. return ret;
  1286. }
  1287. static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
  1288. const void *data, int nbytes)
  1289. {
  1290. int ret;
  1291. /* set write data */
  1292. ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
  1293. if (ret) {
  1294. ath10k_warn(ar,
  1295. "failed to write 0x%p to mbox window data address: %d\n",
  1296. data, ret);
  1297. return ret;
  1298. }
  1299. /* set window register, which starts the write cycle */
  1300. ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
  1301. if (ret) {
  1302. ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
  1303. return ret;
  1304. }
  1305. return 0;
  1306. }
  1307. /* HIF start/stop */
  1308. static int ath10k_sdio_hif_start(struct ath10k *ar)
  1309. {
  1310. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  1311. u32 addr, val;
  1312. int ret;
  1313. /* Sleep 20 ms before HIF interrupts are disabled.
  1314. * This will give target plenty of time to process the BMI done
  1315. * request before interrupts are disabled.
  1316. */
  1317. msleep(20);
  1318. ret = ath10k_sdio_hif_disable_intrs(ar);
  1319. if (ret)
  1320. return ret;
  1321. /* eid 0 always uses the lower part of the extended mailbox address
  1322. * space (ext_info[0].htc_ext_addr).
  1323. */
  1324. ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
  1325. ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
  1326. sdio_claim_host(ar_sdio->func);
  1327. /* Register the isr */
  1328. ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
  1329. if (ret) {
  1330. ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
  1331. sdio_release_host(ar_sdio->func);
  1332. return ret;
  1333. }
  1334. sdio_release_host(ar_sdio->func);
  1335. ret = ath10k_sdio_hif_enable_intrs(ar);
  1336. if (ret)
  1337. ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
  1338. addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
  1339. ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
  1340. if (ret) {
  1341. ath10k_warn(ar, "unable to read hi_acs_flags address: %d\n", ret);
  1342. return ret;
  1343. }
  1344. if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
  1345. ath10k_dbg(ar, ATH10K_DBG_SDIO,
  1346. "sdio mailbox swap service enabled\n");
  1347. ar_sdio->swap_mbox = true;
  1348. }
  1349. /* Enable sleep and then disable it again */
  1350. ret = ath10k_sdio_hif_set_mbox_sleep(ar, true);
  1351. if (ret)
  1352. return ret;
  1353. /* Wait for 20ms for the written value to take effect */
  1354. msleep(20);
  1355. ret = ath10k_sdio_hif_set_mbox_sleep(ar, false);
  1356. if (ret)
  1357. return ret;
  1358. return 0;
  1359. }
  1360. #define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
  1361. static void ath10k_sdio_irq_disable(struct ath10k *ar)
  1362. {
  1363. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  1364. struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
  1365. struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
  1366. struct sk_buff *skb;
  1367. struct completion irqs_disabled_comp;
  1368. int ret;
  1369. skb = dev_alloc_skb(sizeof(*regs));
  1370. if (!skb)
  1371. return;
  1372. mutex_lock(&irq_data->mtx);
  1373. memset(regs, 0, sizeof(*regs)); /* disable all interrupts */
  1374. memcpy(skb->data, regs, sizeof(*regs));
  1375. skb_put(skb, sizeof(*regs));
  1376. mutex_unlock(&irq_data->mtx);
  1377. init_completion(&irqs_disabled_comp);
  1378. ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
  1379. skb, &irqs_disabled_comp, false, 0);
  1380. if (ret)
  1381. goto out;
  1382. queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
  1383. /* Wait for the completion of the IRQ disable request.
  1384. * If there is a timeout we will try to disable irq's anyway.
  1385. */
  1386. ret = wait_for_completion_timeout(&irqs_disabled_comp,
  1387. SDIO_IRQ_DISABLE_TIMEOUT_HZ);
  1388. if (!ret)
  1389. ath10k_warn(ar, "sdio irq disable request timed out\n");
  1390. sdio_claim_host(ar_sdio->func);
  1391. ret = sdio_release_irq(ar_sdio->func);
  1392. if (ret)
  1393. ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
  1394. sdio_release_host(ar_sdio->func);
  1395. out:
  1396. kfree_skb(skb);
  1397. }
  1398. static void ath10k_sdio_hif_stop(struct ath10k *ar)
  1399. {
  1400. struct ath10k_sdio_bus_request *req, *tmp_req;
  1401. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  1402. ath10k_sdio_irq_disable(ar);
  1403. cancel_work_sync(&ar_sdio->wr_async_work);
  1404. spin_lock_bh(&ar_sdio->wr_async_lock);
  1405. /* Free all bus requests that have not been handled */
  1406. list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
  1407. struct ath10k_htc_ep *ep;
  1408. list_del(&req->list);
  1409. if (req->htc_msg) {
  1410. ep = &ar->htc.endpoint[req->eid];
  1411. ath10k_htc_notify_tx_completion(ep, req->skb);
  1412. } else if (req->skb) {
  1413. kfree_skb(req->skb);
  1414. }
  1415. ath10k_sdio_free_bus_req(ar, req);
  1416. }
  1417. spin_unlock_bh(&ar_sdio->wr_async_lock);
  1418. }
  1419. #ifdef CONFIG_PM
  1420. static int ath10k_sdio_hif_suspend(struct ath10k *ar)
  1421. {
  1422. return -EOPNOTSUPP;
  1423. }
  1424. static int ath10k_sdio_hif_resume(struct ath10k *ar)
  1425. {
  1426. switch (ar->state) {
  1427. case ATH10K_STATE_OFF:
  1428. ath10k_dbg(ar, ATH10K_DBG_SDIO,
  1429. "sdio resume configuring sdio\n");
  1430. /* need to set sdio settings after power is cut from sdio */
  1431. ath10k_sdio_config(ar);
  1432. break;
  1433. case ATH10K_STATE_ON:
  1434. default:
  1435. break;
  1436. }
  1437. return 0;
  1438. }
  1439. #endif
  1440. static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
  1441. u16 service_id,
  1442. u8 *ul_pipe, u8 *dl_pipe)
  1443. {
  1444. struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
  1445. struct ath10k_htc *htc = &ar->htc;
  1446. u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
  1447. enum ath10k_htc_ep_id eid;
  1448. bool ep_found = false;
  1449. int i;
  1450. /* For sdio, we are interested in the mapping between eid
  1451. * and pipeid rather than service_id to pipe_id.
  1452. * First we find out which eid has been allocated to the
  1453. * service...
  1454. */
  1455. for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
  1456. if (htc->endpoint[i].service_id == service_id) {
  1457. eid = htc->endpoint[i].eid;
  1458. ep_found = true;
  1459. break;
  1460. }
  1461. }
  1462. if (!ep_found)
  1463. return -EINVAL;
  1464. /* Then we create the simplest mapping possible between pipeid
  1465. * and eid
  1466. */
  1467. *ul_pipe = *dl_pipe = (u8)eid;
  1468. /* Normally, HTT will use the upper part of the extended
  1469. * mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl
  1470. * the lower part (ext_info[0].htc_ext_addr).
  1471. * If fw wants swapping of mailbox addresses, the opposite is true.
  1472. */
  1473. if (ar_sdio->swap_mbox) {
  1474. htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
  1475. wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
  1476. htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
  1477. wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
  1478. } else {
  1479. htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
  1480. wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
  1481. htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
  1482. wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
  1483. }
  1484. switch (service_id) {
  1485. case ATH10K_HTC_SVC_ID_RSVD_CTRL:
  1486. /* HTC ctrl ep mbox address has already been setup in
  1487. * ath10k_sdio_hif_start
  1488. */
  1489. break;
  1490. case ATH10K_HTC_SVC_ID_WMI_CONTROL:
  1491. ar_sdio->mbox_addr[eid] = wmi_addr;
  1492. ar_sdio->mbox_size[eid] = wmi_mbox_size;
  1493. ath10k_dbg(ar, ATH10K_DBG_SDIO,
  1494. "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
  1495. ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
  1496. break;
  1497. case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
  1498. ar_sdio->mbox_addr[eid] = htt_addr;
  1499. ar_sdio->mbox_size[eid] = htt_mbox_size;
  1500. ath10k_dbg(ar, ATH10K_DBG_SDIO,
  1501. "sdio htt data mbox_addr 0x%x mbox_size %d\n",
  1502. ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
  1503. break;
  1504. default:
  1505. ath10k_warn(ar, "unsupported HTC service id: %d\n",
  1506. service_id);
  1507. return -EINVAL;
  1508. }
  1509. return 0;
  1510. }
  1511. static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
  1512. u8 *ul_pipe, u8 *dl_pipe)
  1513. {
  1514. ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
  1515. /* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our
  1516. * case) == 0
  1517. */
  1518. *ul_pipe = 0;
  1519. *dl_pipe = 0;
  1520. }
  1521. /* This op is currently only used by htc_wait_target if the HTC ready
  1522. * message times out. It is not applicable for SDIO since there is nothing
  1523. * we can do if the HTC ready message does not arrive in time.
  1524. * TODO: Make this op non mandatory by introducing a NULL check in the
  1525. * hif op wrapper.
  1526. */
  1527. static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar,
  1528. u8 pipe, int force)
  1529. {
  1530. }
  1531. static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
  1532. .tx_sg = ath10k_sdio_hif_tx_sg,
  1533. .diag_read = ath10k_sdio_hif_diag_read,
  1534. .diag_write = ath10k_sdio_hif_diag_write_mem,
  1535. .exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
  1536. .start = ath10k_sdio_hif_start,
  1537. .stop = ath10k_sdio_hif_stop,
  1538. .map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
  1539. .get_default_pipe = ath10k_sdio_hif_get_default_pipe,
  1540. .send_complete_check = ath10k_sdio_hif_send_complete_check,
  1541. .power_up = ath10k_sdio_hif_power_up,
  1542. .power_down = ath10k_sdio_hif_power_down,
  1543. #ifdef CONFIG_PM
  1544. .suspend = ath10k_sdio_hif_suspend,
  1545. .resume = ath10k_sdio_hif_resume,
  1546. #endif
  1547. };
  1548. #ifdef CONFIG_PM_SLEEP
  1549. /* Empty handlers so that mmc subsystem doesn't remove us entirely during
  1550. * suspend. We instead follow cfg80211 suspend/resume handlers.
  1551. */
  1552. static int ath10k_sdio_pm_suspend(struct device *device)
  1553. {
  1554. return 0;
  1555. }
  1556. static int ath10k_sdio_pm_resume(struct device *device)
  1557. {
  1558. return 0;
  1559. }
  1560. static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
  1561. ath10k_sdio_pm_resume);
  1562. #define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
  1563. #else
  1564. #define ATH10K_SDIO_PM_OPS NULL
  1565. #endif /* CONFIG_PM_SLEEP */
  1566. static int ath10k_sdio_probe(struct sdio_func *func,
  1567. const struct sdio_device_id *id)
  1568. {
  1569. struct ath10k_sdio *ar_sdio;
  1570. struct ath10k *ar;
  1571. enum ath10k_hw_rev hw_rev;
  1572. u32 chip_id, dev_id_base;
  1573. int ret, i;
  1574. /* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
  1575. * If there will be newer chipsets that does not use the hw reg
  1576. * setup as defined in qca6174_regs and qca6174_values, this
  1577. * assumption is no longer valid and hw_rev must be setup differently
  1578. * depending on chipset.
  1579. */
  1580. hw_rev = ATH10K_HW_QCA6174;
  1581. ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
  1582. hw_rev, &ath10k_sdio_hif_ops);
  1583. if (!ar) {
  1584. dev_err(&func->dev, "failed to allocate core\n");
  1585. return -ENOMEM;
  1586. }
  1587. ath10k_dbg(ar, ATH10K_DBG_BOOT,
  1588. "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
  1589. func->num, func->vendor, func->device,
  1590. func->max_blksize, func->cur_blksize);
  1591. ar_sdio = ath10k_sdio_priv(ar);
  1592. ar_sdio->irq_data.irq_proc_reg =
  1593. devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
  1594. GFP_KERNEL);
  1595. if (!ar_sdio->irq_data.irq_proc_reg) {
  1596. ret = -ENOMEM;
  1597. goto err_core_destroy;
  1598. }
  1599. ar_sdio->irq_data.irq_en_reg =
  1600. devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
  1601. GFP_KERNEL);
  1602. if (!ar_sdio->irq_data.irq_en_reg) {
  1603. ret = -ENOMEM;
  1604. goto err_core_destroy;
  1605. }
  1606. ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
  1607. if (!ar_sdio->bmi_buf) {
  1608. ret = -ENOMEM;
  1609. goto err_core_destroy;
  1610. }
  1611. ar_sdio->func = func;
  1612. sdio_set_drvdata(func, ar_sdio);
  1613. ar_sdio->is_disabled = true;
  1614. ar_sdio->ar = ar;
  1615. spin_lock_init(&ar_sdio->lock);
  1616. spin_lock_init(&ar_sdio->wr_async_lock);
  1617. mutex_init(&ar_sdio->irq_data.mtx);
  1618. INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
  1619. INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
  1620. INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
  1621. ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
  1622. if (!ar_sdio->workqueue) {
  1623. ret = -ENOMEM;
  1624. goto err_core_destroy;
  1625. }
  1626. for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
  1627. ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
  1628. dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
  1629. switch (dev_id_base) {
  1630. case QCA_MANUFACTURER_ID_AR6005_BASE:
  1631. case QCA_MANUFACTURER_ID_QCA9377_BASE:
  1632. ar->dev_id = QCA9377_1_0_DEVICE_ID;
  1633. break;
  1634. default:
  1635. ret = -ENODEV;
  1636. ath10k_err(ar, "unsupported device id %u (0x%x)\n",
  1637. dev_id_base, id->device);
  1638. goto err_free_wq;
  1639. }
  1640. ar->id.vendor = id->vendor;
  1641. ar->id.device = id->device;
  1642. ath10k_sdio_set_mbox_info(ar);
  1643. ret = ath10k_sdio_config(ar);
  1644. if (ret) {
  1645. ath10k_err(ar, "failed to config sdio: %d\n", ret);
  1646. goto err_free_wq;
  1647. }
  1648. /* TODO: don't know yet how to get chip_id with SDIO */
  1649. chip_id = 0;
  1650. ret = ath10k_core_register(ar, chip_id);
  1651. if (ret) {
  1652. ath10k_err(ar, "failed to register driver core: %d\n", ret);
  1653. goto err_free_wq;
  1654. }
  1655. /* TODO: remove this once SDIO support is fully implemented */
  1656. ath10k_warn(ar, "WARNING: ath10k SDIO support is incomplete, don't expect anything to work!\n");
  1657. return 0;
  1658. err_free_wq:
  1659. destroy_workqueue(ar_sdio->workqueue);
  1660. err_core_destroy:
  1661. ath10k_core_destroy(ar);
  1662. return ret;
  1663. }
  1664. static void ath10k_sdio_remove(struct sdio_func *func)
  1665. {
  1666. struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
  1667. struct ath10k *ar = ar_sdio->ar;
  1668. ath10k_dbg(ar, ATH10K_DBG_BOOT,
  1669. "sdio removed func %d vendor 0x%x device 0x%x\n",
  1670. func->num, func->vendor, func->device);
  1671. (void)ath10k_sdio_hif_disable_intrs(ar);
  1672. cancel_work_sync(&ar_sdio->wr_async_work);
  1673. ath10k_core_unregister(ar);
  1674. ath10k_core_destroy(ar);
  1675. }
  1676. static const struct sdio_device_id ath10k_sdio_devices[] = {
  1677. {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
  1678. (QCA_SDIO_ID_AR6005_BASE | 0xA))},
  1679. {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
  1680. (QCA_SDIO_ID_QCA9377_BASE | 0x1))},
  1681. {},
  1682. };
  1683. MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
  1684. static struct sdio_driver ath10k_sdio_driver = {
  1685. .name = "ath10k_sdio",
  1686. .id_table = ath10k_sdio_devices,
  1687. .probe = ath10k_sdio_probe,
  1688. .remove = ath10k_sdio_remove,
  1689. .drv.pm = ATH10K_SDIO_PM_OPS,
  1690. };
  1691. static int __init ath10k_sdio_init(void)
  1692. {
  1693. int ret;
  1694. ret = sdio_register_driver(&ath10k_sdio_driver);
  1695. if (ret)
  1696. pr_err("sdio driver registration failed: %d\n", ret);
  1697. return ret;
  1698. }
  1699. static void __exit ath10k_sdio_exit(void)
  1700. {
  1701. sdio_unregister_driver(&ath10k_sdio_driver);
  1702. }
  1703. module_init(ath10k_sdio_init);
  1704. module_exit(ath10k_sdio_exit);
  1705. MODULE_AUTHOR("Qualcomm Atheros");
  1706. MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
  1707. MODULE_LICENSE("Dual BSD/GPL");