pcie.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007
  1. /* Copyright (c) 2014 Broadcom Corporation
  2. *
  3. * Permission to use, copy, modify, and/or distribute this software for any
  4. * purpose with or without fee is hereby granted, provided that the above
  5. * copyright notice and this permission notice appear in all copies.
  6. *
  7. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  8. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  9. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  10. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  12. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  13. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/firmware.h>
  18. #include <linux/pci.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/delay.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/bcma/bcma.h>
  23. #include <linux/sched.h>
  24. #include <asm/unaligned.h>
  25. #include <soc.h>
  26. #include <chipcommon.h>
  27. #include <brcmu_utils.h>
  28. #include <brcmu_wifi.h>
  29. #include <brcm_hw_ids.h>
  30. #include "debug.h"
  31. #include "bus.h"
  32. #include "commonring.h"
  33. #include "msgbuf.h"
  34. #include "pcie.h"
  35. #include "firmware.h"
  36. #include "chip.h"
  37. #include "core.h"
  38. #include "common.h"
  39. enum brcmf_pcie_state {
  40. BRCMFMAC_PCIE_STATE_DOWN,
  41. BRCMFMAC_PCIE_STATE_UP
  42. };
  43. BRCMF_FW_NVRAM_DEF(43602, "brcmfmac43602-pcie.bin", "brcmfmac43602-pcie.txt");
  44. BRCMF_FW_NVRAM_DEF(4350, "brcmfmac4350-pcie.bin", "brcmfmac4350-pcie.txt");
  45. BRCMF_FW_NVRAM_DEF(4350C, "brcmfmac4350c2-pcie.bin", "brcmfmac4350c2-pcie.txt");
  46. BRCMF_FW_NVRAM_DEF(4356, "brcmfmac4356-pcie.bin", "brcmfmac4356-pcie.txt");
  47. BRCMF_FW_NVRAM_DEF(43570, "brcmfmac43570-pcie.bin", "brcmfmac43570-pcie.txt");
  48. BRCMF_FW_NVRAM_DEF(4358, "brcmfmac4358-pcie.bin", "brcmfmac4358-pcie.txt");
  49. BRCMF_FW_NVRAM_DEF(4359, "brcmfmac4359-pcie.bin", "brcmfmac4359-pcie.txt");
  50. BRCMF_FW_NVRAM_DEF(4365B, "brcmfmac4365b-pcie.bin", "brcmfmac4365b-pcie.txt");
  51. BRCMF_FW_NVRAM_DEF(4365C, "brcmfmac4365c-pcie.bin", "brcmfmac4365c-pcie.txt");
  52. BRCMF_FW_NVRAM_DEF(4366B, "brcmfmac4366b-pcie.bin", "brcmfmac4366b-pcie.txt");
  53. BRCMF_FW_NVRAM_DEF(4366C, "brcmfmac4366c-pcie.bin", "brcmfmac4366c-pcie.txt");
  54. BRCMF_FW_NVRAM_DEF(4371, "brcmfmac4371-pcie.bin", "brcmfmac4371-pcie.txt");
  55. static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
  56. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
  57. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
  58. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
  59. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
  60. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
  61. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
  62. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
  63. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
  64. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
  65. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
  66. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
  67. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
  68. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
  69. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
  70. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
  71. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
  72. };
  73. #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
  74. #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
  75. /* backplane addres space accessed by BAR0 */
  76. #define BRCMF_PCIE_BAR0_WINDOW 0x80
  77. #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
  78. #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
  79. #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
  80. #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
  81. #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
  82. #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
  83. #define BRCMF_PCIE_REG_INTSTATUS 0x90
  84. #define BRCMF_PCIE_REG_INTMASK 0x94
  85. #define BRCMF_PCIE_REG_SBMBX 0x98
  86. #define BRCMF_PCIE_REG_LINK_STATUS_CTRL 0xBC
  87. #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
  88. #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
  89. #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
  90. #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
  91. #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
  92. #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140
  93. #define BRCMF_PCIE2_INTA 0x01
  94. #define BRCMF_PCIE2_INTB 0x02
  95. #define BRCMF_PCIE_INT_0 0x01
  96. #define BRCMF_PCIE_INT_1 0x02
  97. #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
  98. BRCMF_PCIE_INT_1)
  99. #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
  100. #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
  101. #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
  102. #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
  103. #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
  104. #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
  105. #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
  106. #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
  107. #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
  108. #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
  109. #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
  110. BRCMF_PCIE_MB_INT_D2H0_DB1 | \
  111. BRCMF_PCIE_MB_INT_D2H1_DB0 | \
  112. BRCMF_PCIE_MB_INT_D2H1_DB1 | \
  113. BRCMF_PCIE_MB_INT_D2H2_DB0 | \
  114. BRCMF_PCIE_MB_INT_D2H2_DB1 | \
  115. BRCMF_PCIE_MB_INT_D2H3_DB0 | \
  116. BRCMF_PCIE_MB_INT_D2H3_DB1)
  117. #define BRCMF_PCIE_MIN_SHARED_VERSION 5
  118. #define BRCMF_PCIE_MAX_SHARED_VERSION 6
  119. #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
  120. #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
  121. #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
  122. #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
  123. #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
  124. #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
  125. #define BRCMF_SHARED_RING_BASE_OFFSET 52
  126. #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
  127. #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
  128. #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
  129. #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
  130. #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
  131. #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
  132. #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
  133. #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
  134. #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
  135. #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
  136. #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
  137. #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
  138. #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
  139. #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
  140. #define BRCMF_RING_MAX_ITEM_OFFSET 4
  141. #define BRCMF_RING_LEN_ITEMS_OFFSET 6
  142. #define BRCMF_RING_MEM_SZ 16
  143. #define BRCMF_RING_STATE_SZ 8
  144. #define BRCMF_DEF_MAX_RXBUFPOST 255
  145. #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
  146. #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
  147. #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
  148. #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
  149. #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
  150. #define BRCMF_D2H_DEV_D3_ACK 0x00000001
  151. #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
  152. #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
  153. #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
  154. #define BRCMF_H2D_HOST_DS_ACK 0x00000002
  155. #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
  156. #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
  157. #define BRCMF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000)
  158. #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
  159. #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
  160. #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
  161. #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
  162. #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
  163. #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
  164. #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
  165. #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
  166. #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
  167. #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
  168. #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
  169. #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
  170. #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
  171. /* Magic number at a magic location to find RAM size */
  172. #define BRCMF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */
  173. #define BRCMF_RAMSIZE_OFFSET 0x6c
  174. struct brcmf_pcie_console {
  175. u32 base_addr;
  176. u32 buf_addr;
  177. u32 bufsize;
  178. u32 read_idx;
  179. u8 log_str[256];
  180. u8 log_idx;
  181. };
  182. struct brcmf_pcie_shared_info {
  183. u32 tcm_base_address;
  184. u32 flags;
  185. struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
  186. struct brcmf_pcie_ringbuf *flowrings;
  187. u16 max_rxbufpost;
  188. u16 max_flowrings;
  189. u16 max_submissionrings;
  190. u16 max_completionrings;
  191. u32 rx_dataoffset;
  192. u32 htod_mb_data_addr;
  193. u32 dtoh_mb_data_addr;
  194. u32 ring_info_addr;
  195. struct brcmf_pcie_console console;
  196. void *scratch;
  197. dma_addr_t scratch_dmahandle;
  198. void *ringupd;
  199. dma_addr_t ringupd_dmahandle;
  200. u8 version;
  201. };
  202. struct brcmf_pcie_core_info {
  203. u32 base;
  204. u32 wrapbase;
  205. };
  206. struct brcmf_pciedev_info {
  207. enum brcmf_pcie_state state;
  208. bool in_irq;
  209. struct pci_dev *pdev;
  210. char fw_name[BRCMF_FW_NAME_LEN];
  211. char nvram_name[BRCMF_FW_NAME_LEN];
  212. void __iomem *regs;
  213. void __iomem *tcm;
  214. u32 ram_base;
  215. u32 ram_size;
  216. struct brcmf_chip *ci;
  217. u32 coreid;
  218. struct brcmf_pcie_shared_info shared;
  219. wait_queue_head_t mbdata_resp_wait;
  220. bool mbdata_completed;
  221. bool irq_allocated;
  222. bool wowl_enabled;
  223. u8 dma_idx_sz;
  224. void *idxbuf;
  225. u32 idxbuf_sz;
  226. dma_addr_t idxbuf_dmahandle;
  227. u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
  228. void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  229. u16 value);
  230. struct brcmf_mp_device *settings;
  231. };
  232. struct brcmf_pcie_ringbuf {
  233. struct brcmf_commonring commonring;
  234. dma_addr_t dma_handle;
  235. u32 w_idx_addr;
  236. u32 r_idx_addr;
  237. struct brcmf_pciedev_info *devinfo;
  238. u8 id;
  239. };
  240. /**
  241. * struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info
  242. *
  243. * @ringmem: dongle memory pointer to ring memory location
  244. * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers
  245. * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers
  246. * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers
  247. * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers
  248. * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers
  249. * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers
  250. * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers
  251. * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers
  252. * @max_flowrings: maximum number of tx flow rings supported.
  253. * @max_submissionrings: maximum number of submission rings(h2d) supported.
  254. * @max_completionrings: maximum number of completion rings(d2h) supported.
  255. */
  256. struct brcmf_pcie_dhi_ringinfo {
  257. __le32 ringmem;
  258. __le32 h2d_w_idx_ptr;
  259. __le32 h2d_r_idx_ptr;
  260. __le32 d2h_w_idx_ptr;
  261. __le32 d2h_r_idx_ptr;
  262. struct msgbuf_buf_addr h2d_w_idx_hostaddr;
  263. struct msgbuf_buf_addr h2d_r_idx_hostaddr;
  264. struct msgbuf_buf_addr d2h_w_idx_hostaddr;
  265. struct msgbuf_buf_addr d2h_r_idx_hostaddr;
  266. __le16 max_flowrings;
  267. __le16 max_submissionrings;
  268. __le16 max_completionrings;
  269. };
  270. static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
  271. BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
  272. BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
  273. BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
  274. BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
  275. BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
  276. };
  277. static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
  278. BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
  279. BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
  280. BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
  281. BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
  282. BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
  283. };
  284. static u32
  285. brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
  286. {
  287. void __iomem *address = devinfo->regs + reg_offset;
  288. return (ioread32(address));
  289. }
  290. static void
  291. brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
  292. u32 value)
  293. {
  294. void __iomem *address = devinfo->regs + reg_offset;
  295. iowrite32(value, address);
  296. }
  297. static u8
  298. brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  299. {
  300. void __iomem *address = devinfo->tcm + mem_offset;
  301. return (ioread8(address));
  302. }
  303. static u16
  304. brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  305. {
  306. void __iomem *address = devinfo->tcm + mem_offset;
  307. return (ioread16(address));
  308. }
  309. static void
  310. brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  311. u16 value)
  312. {
  313. void __iomem *address = devinfo->tcm + mem_offset;
  314. iowrite16(value, address);
  315. }
  316. static u16
  317. brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  318. {
  319. u16 *address = devinfo->idxbuf + mem_offset;
  320. return (*(address));
  321. }
  322. static void
  323. brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  324. u16 value)
  325. {
  326. u16 *address = devinfo->idxbuf + mem_offset;
  327. *(address) = value;
  328. }
  329. static u32
  330. brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  331. {
  332. void __iomem *address = devinfo->tcm + mem_offset;
  333. return (ioread32(address));
  334. }
  335. static void
  336. brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  337. u32 value)
  338. {
  339. void __iomem *address = devinfo->tcm + mem_offset;
  340. iowrite32(value, address);
  341. }
  342. static u32
  343. brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  344. {
  345. void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
  346. return (ioread32(addr));
  347. }
  348. static void
  349. brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  350. u32 value)
  351. {
  352. void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
  353. iowrite32(value, addr);
  354. }
  355. static void
  356. brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  357. void *srcaddr, u32 len)
  358. {
  359. void __iomem *address = devinfo->tcm + mem_offset;
  360. __le32 *src32;
  361. __le16 *src16;
  362. u8 *src8;
  363. if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
  364. if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
  365. src8 = (u8 *)srcaddr;
  366. while (len) {
  367. iowrite8(*src8, address);
  368. address++;
  369. src8++;
  370. len--;
  371. }
  372. } else {
  373. len = len / 2;
  374. src16 = (__le16 *)srcaddr;
  375. while (len) {
  376. iowrite16(le16_to_cpu(*src16), address);
  377. address += 2;
  378. src16++;
  379. len--;
  380. }
  381. }
  382. } else {
  383. len = len / 4;
  384. src32 = (__le32 *)srcaddr;
  385. while (len) {
  386. iowrite32(le32_to_cpu(*src32), address);
  387. address += 4;
  388. src32++;
  389. len--;
  390. }
  391. }
  392. }
  393. static void
  394. brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  395. void *dstaddr, u32 len)
  396. {
  397. void __iomem *address = devinfo->tcm + mem_offset;
  398. __le32 *dst32;
  399. __le16 *dst16;
  400. u8 *dst8;
  401. if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
  402. if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
  403. dst8 = (u8 *)dstaddr;
  404. while (len) {
  405. *dst8 = ioread8(address);
  406. address++;
  407. dst8++;
  408. len--;
  409. }
  410. } else {
  411. len = len / 2;
  412. dst16 = (__le16 *)dstaddr;
  413. while (len) {
  414. *dst16 = cpu_to_le16(ioread16(address));
  415. address += 2;
  416. dst16++;
  417. len--;
  418. }
  419. }
  420. } else {
  421. len = len / 4;
  422. dst32 = (__le32 *)dstaddr;
  423. while (len) {
  424. *dst32 = cpu_to_le32(ioread32(address));
  425. address += 4;
  426. dst32++;
  427. len--;
  428. }
  429. }
  430. }
  431. #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
  432. CHIPCREGOFFS(reg), value)
  433. static void
  434. brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
  435. {
  436. const struct pci_dev *pdev = devinfo->pdev;
  437. struct brcmf_core *core;
  438. u32 bar0_win;
  439. core = brcmf_chip_get_core(devinfo->ci, coreid);
  440. if (core) {
  441. bar0_win = core->base;
  442. pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
  443. if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
  444. &bar0_win) == 0) {
  445. if (bar0_win != core->base) {
  446. bar0_win = core->base;
  447. pci_write_config_dword(pdev,
  448. BRCMF_PCIE_BAR0_WINDOW,
  449. bar0_win);
  450. }
  451. }
  452. } else {
  453. brcmf_err("Unsupported core selected %x\n", coreid);
  454. }
  455. }
  456. static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
  457. {
  458. struct brcmf_core *core;
  459. u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
  460. BRCMF_PCIE_CFGREG_PM_CSR,
  461. BRCMF_PCIE_CFGREG_MSI_CAP,
  462. BRCMF_PCIE_CFGREG_MSI_ADDR_L,
  463. BRCMF_PCIE_CFGREG_MSI_ADDR_H,
  464. BRCMF_PCIE_CFGREG_MSI_DATA,
  465. BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
  466. BRCMF_PCIE_CFGREG_RBAR_CTRL,
  467. BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
  468. BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
  469. BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
  470. u32 i;
  471. u32 val;
  472. u32 lsc;
  473. if (!devinfo->ci)
  474. return;
  475. /* Disable ASPM */
  476. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  477. pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
  478. &lsc);
  479. val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
  480. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
  481. val);
  482. /* Watchdog reset */
  483. brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
  484. WRITECC32(devinfo, watchdog, 4);
  485. msleep(100);
  486. /* Restore ASPM */
  487. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  488. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
  489. lsc);
  490. core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
  491. if (core->rev <= 13) {
  492. for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
  493. brcmf_pcie_write_reg32(devinfo,
  494. BRCMF_PCIE_PCIE2REG_CONFIGADDR,
  495. cfg_offset[i]);
  496. val = brcmf_pcie_read_reg32(devinfo,
  497. BRCMF_PCIE_PCIE2REG_CONFIGDATA);
  498. brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
  499. cfg_offset[i], val);
  500. brcmf_pcie_write_reg32(devinfo,
  501. BRCMF_PCIE_PCIE2REG_CONFIGDATA,
  502. val);
  503. }
  504. }
  505. }
  506. static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
  507. {
  508. u32 config;
  509. /* BAR1 window may not be sized properly */
  510. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  511. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
  512. config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
  513. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
  514. device_wakeup_enable(&devinfo->pdev->dev);
  515. }
  516. static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
  517. {
  518. if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
  519. brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
  520. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
  521. 5);
  522. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
  523. 0);
  524. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
  525. 7);
  526. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
  527. 0);
  528. }
  529. return 0;
  530. }
  531. static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
  532. u32 resetintr)
  533. {
  534. struct brcmf_core *core;
  535. if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
  536. core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
  537. brcmf_chip_resetcore(core, 0, 0, 0);
  538. }
  539. if (!brcmf_chip_set_active(devinfo->ci, resetintr))
  540. return -EINVAL;
  541. return 0;
  542. }
  543. static int
  544. brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
  545. {
  546. struct brcmf_pcie_shared_info *shared;
  547. u32 addr;
  548. u32 cur_htod_mb_data;
  549. u32 i;
  550. shared = &devinfo->shared;
  551. addr = shared->htod_mb_data_addr;
  552. cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  553. if (cur_htod_mb_data != 0)
  554. brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
  555. cur_htod_mb_data);
  556. i = 0;
  557. while (cur_htod_mb_data != 0) {
  558. msleep(10);
  559. i++;
  560. if (i > 100)
  561. return -EIO;
  562. cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  563. }
  564. brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
  565. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
  566. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
  567. return 0;
  568. }
  569. static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
  570. {
  571. struct brcmf_pcie_shared_info *shared;
  572. u32 addr;
  573. u32 dtoh_mb_data;
  574. shared = &devinfo->shared;
  575. addr = shared->dtoh_mb_data_addr;
  576. dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  577. if (!dtoh_mb_data)
  578. return;
  579. brcmf_pcie_write_tcm32(devinfo, addr, 0);
  580. brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
  581. if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
  582. brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
  583. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
  584. brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
  585. }
  586. if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
  587. brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
  588. if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
  589. brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
  590. devinfo->mbdata_completed = true;
  591. wake_up(&devinfo->mbdata_resp_wait);
  592. }
  593. }
  594. static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
  595. {
  596. struct brcmf_pcie_shared_info *shared;
  597. struct brcmf_pcie_console *console;
  598. u32 addr;
  599. shared = &devinfo->shared;
  600. console = &shared->console;
  601. addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
  602. console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  603. addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
  604. console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  605. addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
  606. console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
  607. brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
  608. console->base_addr, console->buf_addr, console->bufsize);
  609. }
  610. static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
  611. {
  612. struct brcmf_pcie_console *console;
  613. u32 addr;
  614. u8 ch;
  615. u32 newidx;
  616. if (!BRCMF_FWCON_ON())
  617. return;
  618. console = &devinfo->shared.console;
  619. addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
  620. newidx = brcmf_pcie_read_tcm32(devinfo, addr);
  621. while (newidx != console->read_idx) {
  622. addr = console->buf_addr + console->read_idx;
  623. ch = brcmf_pcie_read_tcm8(devinfo, addr);
  624. console->read_idx++;
  625. if (console->read_idx == console->bufsize)
  626. console->read_idx = 0;
  627. if (ch == '\r')
  628. continue;
  629. console->log_str[console->log_idx] = ch;
  630. console->log_idx++;
  631. if ((ch != '\n') &&
  632. (console->log_idx == (sizeof(console->log_str) - 2))) {
  633. ch = '\n';
  634. console->log_str[console->log_idx] = ch;
  635. console->log_idx++;
  636. }
  637. if (ch == '\n') {
  638. console->log_str[console->log_idx] = 0;
  639. pr_debug("CONSOLE: %s", console->log_str);
  640. console->log_idx = 0;
  641. }
  642. }
  643. }
  644. static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
  645. {
  646. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
  647. }
  648. static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
  649. {
  650. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
  651. BRCMF_PCIE_MB_INT_D2H_DB |
  652. BRCMF_PCIE_MB_INT_FN0_0 |
  653. BRCMF_PCIE_MB_INT_FN0_1);
  654. }
  655. static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
  656. {
  657. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  658. if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
  659. brcmf_pcie_intr_disable(devinfo);
  660. brcmf_dbg(PCIE, "Enter\n");
  661. return IRQ_WAKE_THREAD;
  662. }
  663. return IRQ_NONE;
  664. }
  665. static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
  666. {
  667. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  668. u32 status;
  669. devinfo->in_irq = true;
  670. status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  671. brcmf_dbg(PCIE, "Enter %x\n", status);
  672. if (status) {
  673. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  674. status);
  675. if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
  676. BRCMF_PCIE_MB_INT_FN0_1))
  677. brcmf_pcie_handle_mb_data(devinfo);
  678. if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
  679. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  680. brcmf_proto_msgbuf_rx_trigger(
  681. &devinfo->pdev->dev);
  682. }
  683. }
  684. brcmf_pcie_bus_console_read(devinfo);
  685. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  686. brcmf_pcie_intr_enable(devinfo);
  687. devinfo->in_irq = false;
  688. return IRQ_HANDLED;
  689. }
  690. static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
  691. {
  692. struct pci_dev *pdev;
  693. pdev = devinfo->pdev;
  694. brcmf_pcie_intr_disable(devinfo);
  695. brcmf_dbg(PCIE, "Enter\n");
  696. pci_enable_msi(pdev);
  697. if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
  698. brcmf_pcie_isr_thread, IRQF_SHARED,
  699. "brcmf_pcie_intr", devinfo)) {
  700. pci_disable_msi(pdev);
  701. brcmf_err("Failed to request IRQ %d\n", pdev->irq);
  702. return -EIO;
  703. }
  704. devinfo->irq_allocated = true;
  705. return 0;
  706. }
  707. static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
  708. {
  709. struct pci_dev *pdev;
  710. u32 status;
  711. u32 count;
  712. if (!devinfo->irq_allocated)
  713. return;
  714. pdev = devinfo->pdev;
  715. brcmf_pcie_intr_disable(devinfo);
  716. free_irq(pdev->irq, devinfo);
  717. pci_disable_msi(pdev);
  718. msleep(50);
  719. count = 0;
  720. while ((devinfo->in_irq) && (count < 20)) {
  721. msleep(50);
  722. count++;
  723. }
  724. if (devinfo->in_irq)
  725. brcmf_err("Still in IRQ (processing) !!!\n");
  726. status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  727. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
  728. devinfo->irq_allocated = false;
  729. }
  730. static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
  731. {
  732. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  733. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  734. struct brcmf_commonring *commonring = &ring->commonring;
  735. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  736. return -EIO;
  737. brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
  738. commonring->w_ptr, ring->id);
  739. devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
  740. return 0;
  741. }
  742. static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
  743. {
  744. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  745. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  746. struct brcmf_commonring *commonring = &ring->commonring;
  747. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  748. return -EIO;
  749. brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
  750. commonring->r_ptr, ring->id);
  751. devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
  752. return 0;
  753. }
  754. static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
  755. {
  756. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  757. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  758. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  759. return -EIO;
  760. brcmf_dbg(PCIE, "RING !\n");
  761. /* Any arbitrary value will do, lets use 1 */
  762. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1);
  763. return 0;
  764. }
  765. static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
  766. {
  767. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  768. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  769. struct brcmf_commonring *commonring = &ring->commonring;
  770. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  771. return -EIO;
  772. commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
  773. brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
  774. commonring->w_ptr, ring->id);
  775. return 0;
  776. }
  777. static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
  778. {
  779. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  780. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  781. struct brcmf_commonring *commonring = &ring->commonring;
  782. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  783. return -EIO;
  784. commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
  785. brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
  786. commonring->r_ptr, ring->id);
  787. return 0;
  788. }
  789. static void *
  790. brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
  791. u32 size, u32 tcm_dma_phys_addr,
  792. dma_addr_t *dma_handle)
  793. {
  794. void *ring;
  795. u64 address;
  796. ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
  797. GFP_KERNEL);
  798. if (!ring)
  799. return NULL;
  800. address = (u64)*dma_handle;
  801. brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
  802. address & 0xffffffff);
  803. brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
  804. memset(ring, 0, size);
  805. return (ring);
  806. }
  807. static struct brcmf_pcie_ringbuf *
  808. brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
  809. u32 tcm_ring_phys_addr)
  810. {
  811. void *dma_buf;
  812. dma_addr_t dma_handle;
  813. struct brcmf_pcie_ringbuf *ring;
  814. u32 size;
  815. u32 addr;
  816. size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id];
  817. dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
  818. tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
  819. &dma_handle);
  820. if (!dma_buf)
  821. return NULL;
  822. addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
  823. brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
  824. addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
  825. brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]);
  826. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  827. if (!ring) {
  828. dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
  829. dma_handle);
  830. return NULL;
  831. }
  832. brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
  833. brcmf_ring_itemsize[ring_id], dma_buf);
  834. ring->dma_handle = dma_handle;
  835. ring->devinfo = devinfo;
  836. brcmf_commonring_register_cb(&ring->commonring,
  837. brcmf_pcie_ring_mb_ring_bell,
  838. brcmf_pcie_ring_mb_update_rptr,
  839. brcmf_pcie_ring_mb_update_wptr,
  840. brcmf_pcie_ring_mb_write_rptr,
  841. brcmf_pcie_ring_mb_write_wptr, ring);
  842. return (ring);
  843. }
  844. static void brcmf_pcie_release_ringbuffer(struct device *dev,
  845. struct brcmf_pcie_ringbuf *ring)
  846. {
  847. void *dma_buf;
  848. u32 size;
  849. if (!ring)
  850. return;
  851. dma_buf = ring->commonring.buf_addr;
  852. if (dma_buf) {
  853. size = ring->commonring.depth * ring->commonring.item_len;
  854. dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
  855. }
  856. kfree(ring);
  857. }
  858. static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
  859. {
  860. u32 i;
  861. for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
  862. brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
  863. devinfo->shared.commonrings[i]);
  864. devinfo->shared.commonrings[i] = NULL;
  865. }
  866. kfree(devinfo->shared.flowrings);
  867. devinfo->shared.flowrings = NULL;
  868. if (devinfo->idxbuf) {
  869. dma_free_coherent(&devinfo->pdev->dev,
  870. devinfo->idxbuf_sz,
  871. devinfo->idxbuf,
  872. devinfo->idxbuf_dmahandle);
  873. devinfo->idxbuf = NULL;
  874. }
  875. }
  876. static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
  877. {
  878. struct brcmf_pcie_ringbuf *ring;
  879. struct brcmf_pcie_ringbuf *rings;
  880. u32 d2h_w_idx_ptr;
  881. u32 d2h_r_idx_ptr;
  882. u32 h2d_w_idx_ptr;
  883. u32 h2d_r_idx_ptr;
  884. u32 ring_mem_ptr;
  885. u32 i;
  886. u64 address;
  887. u32 bufsz;
  888. u8 idx_offset;
  889. struct brcmf_pcie_dhi_ringinfo ringinfo;
  890. u16 max_flowrings;
  891. u16 max_submissionrings;
  892. u16 max_completionrings;
  893. memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr,
  894. sizeof(ringinfo));
  895. if (devinfo->shared.version >= 6) {
  896. max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings);
  897. max_flowrings = le16_to_cpu(ringinfo.max_flowrings);
  898. max_completionrings = le16_to_cpu(ringinfo.max_completionrings);
  899. } else {
  900. max_submissionrings = le16_to_cpu(ringinfo.max_flowrings);
  901. max_flowrings = max_submissionrings -
  902. BRCMF_NROF_H2D_COMMON_MSGRINGS;
  903. max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
  904. }
  905. if (devinfo->dma_idx_sz != 0) {
  906. bufsz = (max_submissionrings + max_completionrings) *
  907. devinfo->dma_idx_sz * 2;
  908. devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
  909. &devinfo->idxbuf_dmahandle,
  910. GFP_KERNEL);
  911. if (!devinfo->idxbuf)
  912. devinfo->dma_idx_sz = 0;
  913. }
  914. if (devinfo->dma_idx_sz == 0) {
  915. d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr);
  916. d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr);
  917. h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr);
  918. h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr);
  919. idx_offset = sizeof(u32);
  920. devinfo->write_ptr = brcmf_pcie_write_tcm16;
  921. devinfo->read_ptr = brcmf_pcie_read_tcm16;
  922. brcmf_dbg(PCIE, "Using TCM indices\n");
  923. } else {
  924. memset(devinfo->idxbuf, 0, bufsz);
  925. devinfo->idxbuf_sz = bufsz;
  926. idx_offset = devinfo->dma_idx_sz;
  927. devinfo->write_ptr = brcmf_pcie_write_idx;
  928. devinfo->read_ptr = brcmf_pcie_read_idx;
  929. h2d_w_idx_ptr = 0;
  930. address = (u64)devinfo->idxbuf_dmahandle;
  931. ringinfo.h2d_w_idx_hostaddr.low_addr =
  932. cpu_to_le32(address & 0xffffffff);
  933. ringinfo.h2d_w_idx_hostaddr.high_addr =
  934. cpu_to_le32(address >> 32);
  935. h2d_r_idx_ptr = h2d_w_idx_ptr +
  936. max_submissionrings * idx_offset;
  937. address += max_submissionrings * idx_offset;
  938. ringinfo.h2d_r_idx_hostaddr.low_addr =
  939. cpu_to_le32(address & 0xffffffff);
  940. ringinfo.h2d_r_idx_hostaddr.high_addr =
  941. cpu_to_le32(address >> 32);
  942. d2h_w_idx_ptr = h2d_r_idx_ptr +
  943. max_submissionrings * idx_offset;
  944. address += max_submissionrings * idx_offset;
  945. ringinfo.d2h_w_idx_hostaddr.low_addr =
  946. cpu_to_le32(address & 0xffffffff);
  947. ringinfo.d2h_w_idx_hostaddr.high_addr =
  948. cpu_to_le32(address >> 32);
  949. d2h_r_idx_ptr = d2h_w_idx_ptr +
  950. max_completionrings * idx_offset;
  951. address += max_completionrings * idx_offset;
  952. ringinfo.d2h_r_idx_hostaddr.low_addr =
  953. cpu_to_le32(address & 0xffffffff);
  954. ringinfo.d2h_r_idx_hostaddr.high_addr =
  955. cpu_to_le32(address >> 32);
  956. memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr,
  957. &ringinfo, sizeof(ringinfo));
  958. brcmf_dbg(PCIE, "Using host memory indices\n");
  959. }
  960. ring_mem_ptr = le32_to_cpu(ringinfo.ringmem);
  961. for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
  962. ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
  963. if (!ring)
  964. goto fail;
  965. ring->w_idx_addr = h2d_w_idx_ptr;
  966. ring->r_idx_addr = h2d_r_idx_ptr;
  967. ring->id = i;
  968. devinfo->shared.commonrings[i] = ring;
  969. h2d_w_idx_ptr += idx_offset;
  970. h2d_r_idx_ptr += idx_offset;
  971. ring_mem_ptr += BRCMF_RING_MEM_SZ;
  972. }
  973. for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
  974. i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
  975. ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
  976. if (!ring)
  977. goto fail;
  978. ring->w_idx_addr = d2h_w_idx_ptr;
  979. ring->r_idx_addr = d2h_r_idx_ptr;
  980. ring->id = i;
  981. devinfo->shared.commonrings[i] = ring;
  982. d2h_w_idx_ptr += idx_offset;
  983. d2h_r_idx_ptr += idx_offset;
  984. ring_mem_ptr += BRCMF_RING_MEM_SZ;
  985. }
  986. devinfo->shared.max_flowrings = max_flowrings;
  987. devinfo->shared.max_submissionrings = max_submissionrings;
  988. devinfo->shared.max_completionrings = max_completionrings;
  989. rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL);
  990. if (!rings)
  991. goto fail;
  992. brcmf_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings);
  993. for (i = 0; i < max_flowrings; i++) {
  994. ring = &rings[i];
  995. ring->devinfo = devinfo;
  996. ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
  997. brcmf_commonring_register_cb(&ring->commonring,
  998. brcmf_pcie_ring_mb_ring_bell,
  999. brcmf_pcie_ring_mb_update_rptr,
  1000. brcmf_pcie_ring_mb_update_wptr,
  1001. brcmf_pcie_ring_mb_write_rptr,
  1002. brcmf_pcie_ring_mb_write_wptr,
  1003. ring);
  1004. ring->w_idx_addr = h2d_w_idx_ptr;
  1005. ring->r_idx_addr = h2d_r_idx_ptr;
  1006. h2d_w_idx_ptr += idx_offset;
  1007. h2d_r_idx_ptr += idx_offset;
  1008. }
  1009. devinfo->shared.flowrings = rings;
  1010. return 0;
  1011. fail:
  1012. brcmf_err("Allocating ring buffers failed\n");
  1013. brcmf_pcie_release_ringbuffers(devinfo);
  1014. return -ENOMEM;
  1015. }
  1016. static void
  1017. brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
  1018. {
  1019. if (devinfo->shared.scratch)
  1020. dma_free_coherent(&devinfo->pdev->dev,
  1021. BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
  1022. devinfo->shared.scratch,
  1023. devinfo->shared.scratch_dmahandle);
  1024. if (devinfo->shared.ringupd)
  1025. dma_free_coherent(&devinfo->pdev->dev,
  1026. BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
  1027. devinfo->shared.ringupd,
  1028. devinfo->shared.ringupd_dmahandle);
  1029. }
  1030. static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
  1031. {
  1032. u64 address;
  1033. u32 addr;
  1034. devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
  1035. BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
  1036. &devinfo->shared.scratch_dmahandle, GFP_KERNEL);
  1037. if (!devinfo->shared.scratch)
  1038. goto fail;
  1039. memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
  1040. addr = devinfo->shared.tcm_base_address +
  1041. BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
  1042. address = (u64)devinfo->shared.scratch_dmahandle;
  1043. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  1044. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  1045. addr = devinfo->shared.tcm_base_address +
  1046. BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
  1047. brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
  1048. devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
  1049. BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
  1050. &devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
  1051. if (!devinfo->shared.ringupd)
  1052. goto fail;
  1053. memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
  1054. addr = devinfo->shared.tcm_base_address +
  1055. BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
  1056. address = (u64)devinfo->shared.ringupd_dmahandle;
  1057. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  1058. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  1059. addr = devinfo->shared.tcm_base_address +
  1060. BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
  1061. brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
  1062. return 0;
  1063. fail:
  1064. brcmf_err("Allocating scratch buffers failed\n");
  1065. brcmf_pcie_release_scratchbuffers(devinfo);
  1066. return -ENOMEM;
  1067. }
  1068. static void brcmf_pcie_down(struct device *dev)
  1069. {
  1070. }
  1071. static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
  1072. {
  1073. return 0;
  1074. }
  1075. static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
  1076. uint len)
  1077. {
  1078. return 0;
  1079. }
  1080. static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
  1081. uint len)
  1082. {
  1083. return 0;
  1084. }
  1085. static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
  1086. {
  1087. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1088. struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
  1089. struct brcmf_pciedev_info *devinfo = buspub->devinfo;
  1090. brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
  1091. devinfo->wowl_enabled = enabled;
  1092. }
  1093. static size_t brcmf_pcie_get_ramsize(struct device *dev)
  1094. {
  1095. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1096. struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
  1097. struct brcmf_pciedev_info *devinfo = buspub->devinfo;
  1098. return devinfo->ci->ramsize - devinfo->ci->srsize;
  1099. }
  1100. static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
  1101. {
  1102. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1103. struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
  1104. struct brcmf_pciedev_info *devinfo = buspub->devinfo;
  1105. brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
  1106. brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
  1107. return 0;
  1108. }
  1109. static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
  1110. .txdata = brcmf_pcie_tx,
  1111. .stop = brcmf_pcie_down,
  1112. .txctl = brcmf_pcie_tx_ctlpkt,
  1113. .rxctl = brcmf_pcie_rx_ctlpkt,
  1114. .wowl_config = brcmf_pcie_wowl_config,
  1115. .get_ramsize = brcmf_pcie_get_ramsize,
  1116. .get_memdump = brcmf_pcie_get_memdump,
  1117. };
  1118. static void
  1119. brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data,
  1120. u32 data_len)
  1121. {
  1122. __le32 *field;
  1123. u32 newsize;
  1124. if (data_len < BRCMF_RAMSIZE_OFFSET + 8)
  1125. return;
  1126. field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET];
  1127. if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC)
  1128. return;
  1129. field++;
  1130. newsize = le32_to_cpup(field);
  1131. brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n",
  1132. newsize);
  1133. devinfo->ci->ramsize = newsize;
  1134. }
  1135. static int
  1136. brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
  1137. u32 sharedram_addr)
  1138. {
  1139. struct brcmf_pcie_shared_info *shared;
  1140. u32 addr;
  1141. shared = &devinfo->shared;
  1142. shared->tcm_base_address = sharedram_addr;
  1143. shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
  1144. shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
  1145. brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
  1146. if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
  1147. (shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
  1148. brcmf_err("Unsupported PCIE version %d\n", shared->version);
  1149. return -EINVAL;
  1150. }
  1151. /* check firmware support dma indicies */
  1152. if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
  1153. if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
  1154. devinfo->dma_idx_sz = sizeof(u16);
  1155. else
  1156. devinfo->dma_idx_sz = sizeof(u32);
  1157. }
  1158. addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
  1159. shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
  1160. if (shared->max_rxbufpost == 0)
  1161. shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
  1162. addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
  1163. shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
  1164. addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
  1165. shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1166. addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
  1167. shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1168. addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
  1169. shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1170. brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
  1171. shared->max_rxbufpost, shared->rx_dataoffset);
  1172. brcmf_pcie_bus_console_init(devinfo);
  1173. return 0;
  1174. }
  1175. static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
  1176. const struct firmware *fw, void *nvram,
  1177. u32 nvram_len)
  1178. {
  1179. u32 sharedram_addr;
  1180. u32 sharedram_addr_written;
  1181. u32 loop_counter;
  1182. int err;
  1183. u32 address;
  1184. u32 resetintr;
  1185. brcmf_dbg(PCIE, "Halt ARM.\n");
  1186. err = brcmf_pcie_enter_download_state(devinfo);
  1187. if (err)
  1188. return err;
  1189. brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
  1190. brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
  1191. (void *)fw->data, fw->size);
  1192. resetintr = get_unaligned_le32(fw->data);
  1193. release_firmware(fw);
  1194. /* reset last 4 bytes of RAM address. to be used for shared
  1195. * area. This identifies when FW is running
  1196. */
  1197. brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
  1198. if (nvram) {
  1199. brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
  1200. address = devinfo->ci->rambase + devinfo->ci->ramsize -
  1201. nvram_len;
  1202. brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
  1203. brcmf_fw_nvram_free(nvram);
  1204. } else {
  1205. brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
  1206. devinfo->nvram_name);
  1207. }
  1208. sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
  1209. devinfo->ci->ramsize -
  1210. 4);
  1211. brcmf_dbg(PCIE, "Bring ARM in running state\n");
  1212. err = brcmf_pcie_exit_download_state(devinfo, resetintr);
  1213. if (err)
  1214. return err;
  1215. brcmf_dbg(PCIE, "Wait for FW init\n");
  1216. sharedram_addr = sharedram_addr_written;
  1217. loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
  1218. while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
  1219. msleep(50);
  1220. sharedram_addr = brcmf_pcie_read_ram32(devinfo,
  1221. devinfo->ci->ramsize -
  1222. 4);
  1223. loop_counter--;
  1224. }
  1225. if (sharedram_addr == sharedram_addr_written) {
  1226. brcmf_err("FW failed to initialize\n");
  1227. return -ENODEV;
  1228. }
  1229. brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
  1230. return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
  1231. }
  1232. static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
  1233. {
  1234. struct pci_dev *pdev;
  1235. int err;
  1236. phys_addr_t bar0_addr, bar1_addr;
  1237. ulong bar1_size;
  1238. pdev = devinfo->pdev;
  1239. err = pci_enable_device(pdev);
  1240. if (err) {
  1241. brcmf_err("pci_enable_device failed err=%d\n", err);
  1242. return err;
  1243. }
  1244. pci_set_master(pdev);
  1245. /* Bar-0 mapped address */
  1246. bar0_addr = pci_resource_start(pdev, 0);
  1247. /* Bar-1 mapped address */
  1248. bar1_addr = pci_resource_start(pdev, 2);
  1249. /* read Bar-1 mapped memory range */
  1250. bar1_size = pci_resource_len(pdev, 2);
  1251. if ((bar1_size == 0) || (bar1_addr == 0)) {
  1252. brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
  1253. bar1_size, (unsigned long long)bar1_addr);
  1254. return -EINVAL;
  1255. }
  1256. devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
  1257. devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size);
  1258. if (!devinfo->regs || !devinfo->tcm) {
  1259. brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs,
  1260. devinfo->tcm);
  1261. return -EINVAL;
  1262. }
  1263. brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
  1264. devinfo->regs, (unsigned long long)bar0_addr);
  1265. brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n",
  1266. devinfo->tcm, (unsigned long long)bar1_addr,
  1267. (unsigned int)bar1_size);
  1268. return 0;
  1269. }
  1270. static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
  1271. {
  1272. if (devinfo->tcm)
  1273. iounmap(devinfo->tcm);
  1274. if (devinfo->regs)
  1275. iounmap(devinfo->regs);
  1276. pci_disable_device(devinfo->pdev);
  1277. }
  1278. static int brcmf_pcie_attach_bus(struct brcmf_pciedev_info *devinfo)
  1279. {
  1280. int ret;
  1281. /* Attach to the common driver interface */
  1282. ret = brcmf_attach(&devinfo->pdev->dev, devinfo->settings);
  1283. if (ret) {
  1284. brcmf_err("brcmf_attach failed\n");
  1285. } else {
  1286. ret = brcmf_bus_started(&devinfo->pdev->dev);
  1287. if (ret)
  1288. brcmf_err("dongle is not responding\n");
  1289. }
  1290. return ret;
  1291. }
  1292. static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
  1293. {
  1294. u32 ret_addr;
  1295. ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
  1296. addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
  1297. pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
  1298. return ret_addr;
  1299. }
  1300. static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
  1301. {
  1302. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1303. addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
  1304. return brcmf_pcie_read_reg32(devinfo, addr);
  1305. }
  1306. static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
  1307. {
  1308. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1309. addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
  1310. brcmf_pcie_write_reg32(devinfo, addr, value);
  1311. }
  1312. static int brcmf_pcie_buscoreprep(void *ctx)
  1313. {
  1314. return brcmf_pcie_get_resource(ctx);
  1315. }
  1316. static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
  1317. {
  1318. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1319. u32 val;
  1320. devinfo->ci = chip;
  1321. brcmf_pcie_reset_device(devinfo);
  1322. val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  1323. if (val != 0xffffffff)
  1324. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  1325. val);
  1326. return 0;
  1327. }
  1328. static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
  1329. u32 rstvec)
  1330. {
  1331. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1332. brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
  1333. }
  1334. static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
  1335. .prepare = brcmf_pcie_buscoreprep,
  1336. .reset = brcmf_pcie_buscore_reset,
  1337. .activate = brcmf_pcie_buscore_activate,
  1338. .read32 = brcmf_pcie_buscore_read32,
  1339. .write32 = brcmf_pcie_buscore_write32,
  1340. };
  1341. static void brcmf_pcie_setup(struct device *dev, int ret,
  1342. const struct firmware *fw,
  1343. void *nvram, u32 nvram_len)
  1344. {
  1345. struct brcmf_bus *bus;
  1346. struct brcmf_pciedev *pcie_bus_dev;
  1347. struct brcmf_pciedev_info *devinfo;
  1348. struct brcmf_commonring **flowrings;
  1349. u32 i;
  1350. /* check firmware loading result */
  1351. if (ret)
  1352. goto fail;
  1353. bus = dev_get_drvdata(dev);
  1354. pcie_bus_dev = bus->bus_priv.pcie;
  1355. devinfo = pcie_bus_dev->devinfo;
  1356. brcmf_pcie_attach(devinfo);
  1357. /* Some of the firmwares have the size of the memory of the device
  1358. * defined inside the firmware. This is because part of the memory in
  1359. * the device is shared and the devision is determined by FW. Parse
  1360. * the firmware and adjust the chip memory size now.
  1361. */
  1362. brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size);
  1363. ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
  1364. if (ret)
  1365. goto fail;
  1366. devinfo->state = BRCMFMAC_PCIE_STATE_UP;
  1367. ret = brcmf_pcie_init_ringbuffers(devinfo);
  1368. if (ret)
  1369. goto fail;
  1370. ret = brcmf_pcie_init_scratchbuffers(devinfo);
  1371. if (ret)
  1372. goto fail;
  1373. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  1374. ret = brcmf_pcie_request_irq(devinfo);
  1375. if (ret)
  1376. goto fail;
  1377. /* hook the commonrings in the bus structure. */
  1378. for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
  1379. bus->msgbuf->commonrings[i] =
  1380. &devinfo->shared.commonrings[i]->commonring;
  1381. flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings),
  1382. GFP_KERNEL);
  1383. if (!flowrings)
  1384. goto fail;
  1385. for (i = 0; i < devinfo->shared.max_flowrings; i++)
  1386. flowrings[i] = &devinfo->shared.flowrings[i].commonring;
  1387. bus->msgbuf->flowrings = flowrings;
  1388. bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
  1389. bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
  1390. bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings;
  1391. init_waitqueue_head(&devinfo->mbdata_resp_wait);
  1392. brcmf_pcie_intr_enable(devinfo);
  1393. if (brcmf_pcie_attach_bus(devinfo) == 0)
  1394. return;
  1395. brcmf_pcie_bus_console_read(devinfo);
  1396. fail:
  1397. device_release_driver(dev);
  1398. }
  1399. static int
  1400. brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1401. {
  1402. int ret;
  1403. struct brcmf_pciedev_info *devinfo;
  1404. struct brcmf_pciedev *pcie_bus_dev;
  1405. struct brcmf_bus *bus;
  1406. u16 domain_nr;
  1407. u16 bus_nr;
  1408. domain_nr = pci_domain_nr(pdev->bus) + 1;
  1409. bus_nr = pdev->bus->number;
  1410. brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device,
  1411. domain_nr, bus_nr);
  1412. ret = -ENOMEM;
  1413. devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
  1414. if (devinfo == NULL)
  1415. return ret;
  1416. devinfo->pdev = pdev;
  1417. pcie_bus_dev = NULL;
  1418. devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
  1419. if (IS_ERR(devinfo->ci)) {
  1420. ret = PTR_ERR(devinfo->ci);
  1421. devinfo->ci = NULL;
  1422. goto fail;
  1423. }
  1424. pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
  1425. if (pcie_bus_dev == NULL) {
  1426. ret = -ENOMEM;
  1427. goto fail;
  1428. }
  1429. devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev,
  1430. BRCMF_BUSTYPE_PCIE,
  1431. devinfo->ci->chip,
  1432. devinfo->ci->chiprev);
  1433. if (!devinfo->settings) {
  1434. ret = -ENOMEM;
  1435. goto fail;
  1436. }
  1437. bus = kzalloc(sizeof(*bus), GFP_KERNEL);
  1438. if (!bus) {
  1439. ret = -ENOMEM;
  1440. goto fail;
  1441. }
  1442. bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
  1443. if (!bus->msgbuf) {
  1444. ret = -ENOMEM;
  1445. kfree(bus);
  1446. goto fail;
  1447. }
  1448. /* hook it all together. */
  1449. pcie_bus_dev->devinfo = devinfo;
  1450. pcie_bus_dev->bus = bus;
  1451. bus->dev = &pdev->dev;
  1452. bus->bus_priv.pcie = pcie_bus_dev;
  1453. bus->ops = &brcmf_pcie_bus_ops;
  1454. bus->proto_type = BRCMF_PROTO_MSGBUF;
  1455. bus->chip = devinfo->coreid;
  1456. bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
  1457. dev_set_drvdata(&pdev->dev, bus);
  1458. ret = brcmf_fw_map_chip_to_name(devinfo->ci->chip, devinfo->ci->chiprev,
  1459. brcmf_pcie_fwnames,
  1460. ARRAY_SIZE(brcmf_pcie_fwnames),
  1461. devinfo->fw_name, devinfo->nvram_name);
  1462. if (ret)
  1463. goto fail_bus;
  1464. ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM |
  1465. BRCMF_FW_REQ_NV_OPTIONAL,
  1466. devinfo->fw_name, devinfo->nvram_name,
  1467. brcmf_pcie_setup, domain_nr, bus_nr);
  1468. if (ret == 0)
  1469. return 0;
  1470. fail_bus:
  1471. kfree(bus->msgbuf);
  1472. kfree(bus);
  1473. fail:
  1474. brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device);
  1475. brcmf_pcie_release_resource(devinfo);
  1476. if (devinfo->ci)
  1477. brcmf_chip_detach(devinfo->ci);
  1478. if (devinfo->settings)
  1479. brcmf_release_module_param(devinfo->settings);
  1480. kfree(pcie_bus_dev);
  1481. kfree(devinfo);
  1482. return ret;
  1483. }
  1484. static void
  1485. brcmf_pcie_remove(struct pci_dev *pdev)
  1486. {
  1487. struct brcmf_pciedev_info *devinfo;
  1488. struct brcmf_bus *bus;
  1489. brcmf_dbg(PCIE, "Enter\n");
  1490. bus = dev_get_drvdata(&pdev->dev);
  1491. if (bus == NULL)
  1492. return;
  1493. devinfo = bus->bus_priv.pcie->devinfo;
  1494. devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
  1495. if (devinfo->ci)
  1496. brcmf_pcie_intr_disable(devinfo);
  1497. brcmf_detach(&pdev->dev);
  1498. kfree(bus->bus_priv.pcie);
  1499. kfree(bus->msgbuf->flowrings);
  1500. kfree(bus->msgbuf);
  1501. kfree(bus);
  1502. brcmf_pcie_release_irq(devinfo);
  1503. brcmf_pcie_release_scratchbuffers(devinfo);
  1504. brcmf_pcie_release_ringbuffers(devinfo);
  1505. brcmf_pcie_reset_device(devinfo);
  1506. brcmf_pcie_release_resource(devinfo);
  1507. if (devinfo->ci)
  1508. brcmf_chip_detach(devinfo->ci);
  1509. if (devinfo->settings)
  1510. brcmf_release_module_param(devinfo->settings);
  1511. kfree(devinfo);
  1512. dev_set_drvdata(&pdev->dev, NULL);
  1513. }
  1514. #ifdef CONFIG_PM
  1515. static int brcmf_pcie_pm_enter_D3(struct device *dev)
  1516. {
  1517. struct brcmf_pciedev_info *devinfo;
  1518. struct brcmf_bus *bus;
  1519. brcmf_dbg(PCIE, "Enter\n");
  1520. bus = dev_get_drvdata(dev);
  1521. devinfo = bus->bus_priv.pcie->devinfo;
  1522. brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
  1523. devinfo->mbdata_completed = false;
  1524. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
  1525. wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed,
  1526. BRCMF_PCIE_MBDATA_TIMEOUT);
  1527. if (!devinfo->mbdata_completed) {
  1528. brcmf_err("Timeout on response for entering D3 substate\n");
  1529. brcmf_bus_change_state(bus, BRCMF_BUS_UP);
  1530. return -EIO;
  1531. }
  1532. devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
  1533. return 0;
  1534. }
  1535. static int brcmf_pcie_pm_leave_D3(struct device *dev)
  1536. {
  1537. struct brcmf_pciedev_info *devinfo;
  1538. struct brcmf_bus *bus;
  1539. struct pci_dev *pdev;
  1540. int err;
  1541. brcmf_dbg(PCIE, "Enter\n");
  1542. bus = dev_get_drvdata(dev);
  1543. devinfo = bus->bus_priv.pcie->devinfo;
  1544. brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
  1545. /* Check if device is still up and running, if so we are ready */
  1546. if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
  1547. brcmf_dbg(PCIE, "Try to wakeup device....\n");
  1548. if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
  1549. goto cleanup;
  1550. brcmf_dbg(PCIE, "Hot resume, continue....\n");
  1551. devinfo->state = BRCMFMAC_PCIE_STATE_UP;
  1552. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  1553. brcmf_bus_change_state(bus, BRCMF_BUS_UP);
  1554. brcmf_pcie_intr_enable(devinfo);
  1555. return 0;
  1556. }
  1557. cleanup:
  1558. brcmf_chip_detach(devinfo->ci);
  1559. devinfo->ci = NULL;
  1560. pdev = devinfo->pdev;
  1561. brcmf_pcie_remove(pdev);
  1562. err = brcmf_pcie_probe(pdev, NULL);
  1563. if (err)
  1564. brcmf_err("probe after resume failed, err=%d\n", err);
  1565. return err;
  1566. }
  1567. static const struct dev_pm_ops brcmf_pciedrvr_pm = {
  1568. .suspend = brcmf_pcie_pm_enter_D3,
  1569. .resume = brcmf_pcie_pm_leave_D3,
  1570. .freeze = brcmf_pcie_pm_enter_D3,
  1571. .restore = brcmf_pcie_pm_leave_D3,
  1572. };
  1573. #endif /* CONFIG_PM */
  1574. #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
  1575. PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
  1576. #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \
  1577. BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
  1578. subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
  1579. static const struct pci_device_id brcmf_pcie_devid_table[] = {
  1580. BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
  1581. BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
  1582. BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
  1583. BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
  1584. BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
  1585. BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
  1586. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
  1587. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
  1588. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
  1589. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
  1590. BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
  1591. BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
  1592. BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
  1593. BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
  1594. BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
  1595. BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
  1596. BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
  1597. BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
  1598. { /* end: all zeroes */ }
  1599. };
  1600. MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
  1601. static struct pci_driver brcmf_pciedrvr = {
  1602. .node = {},
  1603. .name = KBUILD_MODNAME,
  1604. .id_table = brcmf_pcie_devid_table,
  1605. .probe = brcmf_pcie_probe,
  1606. .remove = brcmf_pcie_remove,
  1607. #ifdef CONFIG_PM
  1608. .driver.pm = &brcmf_pciedrvr_pm,
  1609. #endif
  1610. };
  1611. void brcmf_pcie_register(void)
  1612. {
  1613. int err;
  1614. brcmf_dbg(PCIE, "Enter\n");
  1615. err = pci_register_driver(&brcmf_pciedrvr);
  1616. if (err)
  1617. brcmf_err("PCIE driver registration failed, err=%d\n", err);
  1618. }
  1619. void brcmf_pcie_exit(void)
  1620. {
  1621. brcmf_dbg(PCIE, "Enter\n");
  1622. pci_unregister_driver(&brcmf_pciedrvr);
  1623. }