pcie.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967
  1. /* Copyright (c) 2014 Broadcom Corporation
  2. *
  3. * Permission to use, copy, modify, and/or distribute this software for any
  4. * purpose with or without fee is hereby granted, provided that the above
  5. * copyright notice and this permission notice appear in all copies.
  6. *
  7. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  8. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  9. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  10. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  12. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  13. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/firmware.h>
  18. #include <linux/pci.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/delay.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/bcma/bcma.h>
  23. #include <linux/sched.h>
  24. #include <asm/unaligned.h>
  25. #include <soc.h>
  26. #include <chipcommon.h>
  27. #include <brcmu_utils.h>
  28. #include <brcmu_wifi.h>
  29. #include <brcm_hw_ids.h>
  30. #include "debug.h"
  31. #include "bus.h"
  32. #include "commonring.h"
  33. #include "msgbuf.h"
  34. #include "pcie.h"
  35. #include "firmware.h"
  36. #include "chip.h"
  37. #include "core.h"
  38. #include "common.h"
  39. enum brcmf_pcie_state {
  40. BRCMFMAC_PCIE_STATE_DOWN,
  41. BRCMFMAC_PCIE_STATE_UP
  42. };
  43. BRCMF_FW_NVRAM_DEF(43602, "brcmfmac43602-pcie.bin", "brcmfmac43602-pcie.txt");
  44. BRCMF_FW_NVRAM_DEF(4350, "brcmfmac4350-pcie.bin", "brcmfmac4350-pcie.txt");
  45. BRCMF_FW_NVRAM_DEF(4350C, "brcmfmac4350c2-pcie.bin", "brcmfmac4350c2-pcie.txt");
  46. BRCMF_FW_NVRAM_DEF(4356, "brcmfmac4356-pcie.bin", "brcmfmac4356-pcie.txt");
  47. BRCMF_FW_NVRAM_DEF(43570, "brcmfmac43570-pcie.bin", "brcmfmac43570-pcie.txt");
  48. BRCMF_FW_NVRAM_DEF(4358, "brcmfmac4358-pcie.bin", "brcmfmac4358-pcie.txt");
  49. BRCMF_FW_NVRAM_DEF(4359, "brcmfmac4359-pcie.bin", "brcmfmac4359-pcie.txt");
  50. BRCMF_FW_NVRAM_DEF(4365B, "brcmfmac4365b-pcie.bin", "brcmfmac4365b-pcie.txt");
  51. BRCMF_FW_NVRAM_DEF(4365C, "brcmfmac4365c-pcie.bin", "brcmfmac4365c-pcie.txt");
  52. BRCMF_FW_NVRAM_DEF(4366B, "brcmfmac4366b-pcie.bin", "brcmfmac4366b-pcie.txt");
  53. BRCMF_FW_NVRAM_DEF(4366C, "brcmfmac4366c-pcie.bin", "brcmfmac4366c-pcie.txt");
  54. BRCMF_FW_NVRAM_DEF(4371, "brcmfmac4371-pcie.bin", "brcmfmac4371-pcie.txt");
  55. static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
  56. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
  57. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
  58. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
  59. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
  60. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
  61. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
  62. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
  63. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
  64. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
  65. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
  66. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
  67. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
  68. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
  69. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
  70. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
  71. BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
  72. };
  73. #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
  74. #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
  75. /* backplane addres space accessed by BAR0 */
  76. #define BRCMF_PCIE_BAR0_WINDOW 0x80
  77. #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
  78. #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
  79. #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
  80. #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
  81. #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
  82. #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
  83. #define BRCMF_PCIE_REG_INTSTATUS 0x90
  84. #define BRCMF_PCIE_REG_INTMASK 0x94
  85. #define BRCMF_PCIE_REG_SBMBX 0x98
  86. #define BRCMF_PCIE_REG_LINK_STATUS_CTRL 0xBC
  87. #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
  88. #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
  89. #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
  90. #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
  91. #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
  92. #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140
  93. #define BRCMF_PCIE2_INTA 0x01
  94. #define BRCMF_PCIE2_INTB 0x02
  95. #define BRCMF_PCIE_INT_0 0x01
  96. #define BRCMF_PCIE_INT_1 0x02
  97. #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
  98. BRCMF_PCIE_INT_1)
  99. #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
  100. #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
  101. #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
  102. #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
  103. #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
  104. #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
  105. #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
  106. #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
  107. #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
  108. #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
  109. #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
  110. BRCMF_PCIE_MB_INT_D2H0_DB1 | \
  111. BRCMF_PCIE_MB_INT_D2H1_DB0 | \
  112. BRCMF_PCIE_MB_INT_D2H1_DB1 | \
  113. BRCMF_PCIE_MB_INT_D2H2_DB0 | \
  114. BRCMF_PCIE_MB_INT_D2H2_DB1 | \
  115. BRCMF_PCIE_MB_INT_D2H3_DB0 | \
  116. BRCMF_PCIE_MB_INT_D2H3_DB1)
  117. #define BRCMF_PCIE_MIN_SHARED_VERSION 5
  118. #define BRCMF_PCIE_MAX_SHARED_VERSION 5
  119. #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
  120. #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
  121. #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
  122. #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
  123. #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
  124. #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
  125. #define BRCMF_SHARED_RING_BASE_OFFSET 52
  126. #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
  127. #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
  128. #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
  129. #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
  130. #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
  131. #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
  132. #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
  133. #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
  134. #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
  135. #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
  136. #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
  137. #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
  138. #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
  139. #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
  140. #define BRCMF_RING_MAX_ITEM_OFFSET 4
  141. #define BRCMF_RING_LEN_ITEMS_OFFSET 6
  142. #define BRCMF_RING_MEM_SZ 16
  143. #define BRCMF_RING_STATE_SZ 8
  144. #define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET 4
  145. #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
  146. #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
  147. #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
  148. #define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET 20
  149. #define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET 28
  150. #define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET 36
  151. #define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET 44
  152. #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
  153. #define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
  154. #define BRCMF_DEF_MAX_RXBUFPOST 255
  155. #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
  156. #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
  157. #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
  158. #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
  159. #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
  160. #define BRCMF_D2H_DEV_D3_ACK 0x00000001
  161. #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
  162. #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
  163. #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
  164. #define BRCMF_H2D_HOST_DS_ACK 0x00000002
  165. #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
  166. #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
  167. #define BRCMF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000)
  168. #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
  169. #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
  170. #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
  171. #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
  172. #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
  173. #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
  174. #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
  175. #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
  176. #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
  177. #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
  178. #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
  179. #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
  180. #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
  181. /* Magic number at a magic location to find RAM size */
  182. #define BRCMF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */
  183. #define BRCMF_RAMSIZE_OFFSET 0x6c
  184. struct brcmf_pcie_console {
  185. u32 base_addr;
  186. u32 buf_addr;
  187. u32 bufsize;
  188. u32 read_idx;
  189. u8 log_str[256];
  190. u8 log_idx;
  191. };
  192. struct brcmf_pcie_shared_info {
  193. u32 tcm_base_address;
  194. u32 flags;
  195. struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
  196. struct brcmf_pcie_ringbuf *flowrings;
  197. u16 max_rxbufpost;
  198. u32 nrof_flowrings;
  199. u32 rx_dataoffset;
  200. u32 htod_mb_data_addr;
  201. u32 dtoh_mb_data_addr;
  202. u32 ring_info_addr;
  203. struct brcmf_pcie_console console;
  204. void *scratch;
  205. dma_addr_t scratch_dmahandle;
  206. void *ringupd;
  207. dma_addr_t ringupd_dmahandle;
  208. };
  209. struct brcmf_pcie_core_info {
  210. u32 base;
  211. u32 wrapbase;
  212. };
  213. struct brcmf_pciedev_info {
  214. enum brcmf_pcie_state state;
  215. bool in_irq;
  216. struct pci_dev *pdev;
  217. char fw_name[BRCMF_FW_NAME_LEN];
  218. char nvram_name[BRCMF_FW_NAME_LEN];
  219. void __iomem *regs;
  220. void __iomem *tcm;
  221. u32 ram_base;
  222. u32 ram_size;
  223. struct brcmf_chip *ci;
  224. u32 coreid;
  225. struct brcmf_pcie_shared_info shared;
  226. wait_queue_head_t mbdata_resp_wait;
  227. bool mbdata_completed;
  228. bool irq_allocated;
  229. bool wowl_enabled;
  230. u8 dma_idx_sz;
  231. void *idxbuf;
  232. u32 idxbuf_sz;
  233. dma_addr_t idxbuf_dmahandle;
  234. u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
  235. void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  236. u16 value);
  237. struct brcmf_mp_device *settings;
  238. };
  239. struct brcmf_pcie_ringbuf {
  240. struct brcmf_commonring commonring;
  241. dma_addr_t dma_handle;
  242. u32 w_idx_addr;
  243. u32 r_idx_addr;
  244. struct brcmf_pciedev_info *devinfo;
  245. u8 id;
  246. };
  247. static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
  248. BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
  249. BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
  250. BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
  251. BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
  252. BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
  253. };
  254. static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
  255. BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
  256. BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
  257. BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
  258. BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
  259. BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
  260. };
  261. static u32
  262. brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
  263. {
  264. void __iomem *address = devinfo->regs + reg_offset;
  265. return (ioread32(address));
  266. }
  267. static void
  268. brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
  269. u32 value)
  270. {
  271. void __iomem *address = devinfo->regs + reg_offset;
  272. iowrite32(value, address);
  273. }
  274. static u8
  275. brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  276. {
  277. void __iomem *address = devinfo->tcm + mem_offset;
  278. return (ioread8(address));
  279. }
  280. static u16
  281. brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  282. {
  283. void __iomem *address = devinfo->tcm + mem_offset;
  284. return (ioread16(address));
  285. }
  286. static void
  287. brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  288. u16 value)
  289. {
  290. void __iomem *address = devinfo->tcm + mem_offset;
  291. iowrite16(value, address);
  292. }
  293. static u16
  294. brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  295. {
  296. u16 *address = devinfo->idxbuf + mem_offset;
  297. return (*(address));
  298. }
  299. static void
  300. brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  301. u16 value)
  302. {
  303. u16 *address = devinfo->idxbuf + mem_offset;
  304. *(address) = value;
  305. }
  306. static u32
  307. brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  308. {
  309. void __iomem *address = devinfo->tcm + mem_offset;
  310. return (ioread32(address));
  311. }
  312. static void
  313. brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  314. u32 value)
  315. {
  316. void __iomem *address = devinfo->tcm + mem_offset;
  317. iowrite32(value, address);
  318. }
  319. static u32
  320. brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  321. {
  322. void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
  323. return (ioread32(addr));
  324. }
  325. static void
  326. brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  327. u32 value)
  328. {
  329. void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
  330. iowrite32(value, addr);
  331. }
  332. static void
  333. brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  334. void *srcaddr, u32 len)
  335. {
  336. void __iomem *address = devinfo->tcm + mem_offset;
  337. __le32 *src32;
  338. __le16 *src16;
  339. u8 *src8;
  340. if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
  341. if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
  342. src8 = (u8 *)srcaddr;
  343. while (len) {
  344. iowrite8(*src8, address);
  345. address++;
  346. src8++;
  347. len--;
  348. }
  349. } else {
  350. len = len / 2;
  351. src16 = (__le16 *)srcaddr;
  352. while (len) {
  353. iowrite16(le16_to_cpu(*src16), address);
  354. address += 2;
  355. src16++;
  356. len--;
  357. }
  358. }
  359. } else {
  360. len = len / 4;
  361. src32 = (__le32 *)srcaddr;
  362. while (len) {
  363. iowrite32(le32_to_cpu(*src32), address);
  364. address += 4;
  365. src32++;
  366. len--;
  367. }
  368. }
  369. }
  370. static void
  371. brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  372. void *dstaddr, u32 len)
  373. {
  374. void __iomem *address = devinfo->tcm + mem_offset;
  375. __le32 *dst32;
  376. __le16 *dst16;
  377. u8 *dst8;
  378. if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
  379. if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
  380. dst8 = (u8 *)dstaddr;
  381. while (len) {
  382. *dst8 = ioread8(address);
  383. address++;
  384. dst8++;
  385. len--;
  386. }
  387. } else {
  388. len = len / 2;
  389. dst16 = (__le16 *)dstaddr;
  390. while (len) {
  391. *dst16 = cpu_to_le16(ioread16(address));
  392. address += 2;
  393. dst16++;
  394. len--;
  395. }
  396. }
  397. } else {
  398. len = len / 4;
  399. dst32 = (__le32 *)dstaddr;
  400. while (len) {
  401. *dst32 = cpu_to_le32(ioread32(address));
  402. address += 4;
  403. dst32++;
  404. len--;
  405. }
  406. }
  407. }
  408. #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
  409. CHIPCREGOFFS(reg), value)
  410. static void
  411. brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
  412. {
  413. const struct pci_dev *pdev = devinfo->pdev;
  414. struct brcmf_core *core;
  415. u32 bar0_win;
  416. core = brcmf_chip_get_core(devinfo->ci, coreid);
  417. if (core) {
  418. bar0_win = core->base;
  419. pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
  420. if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
  421. &bar0_win) == 0) {
  422. if (bar0_win != core->base) {
  423. bar0_win = core->base;
  424. pci_write_config_dword(pdev,
  425. BRCMF_PCIE_BAR0_WINDOW,
  426. bar0_win);
  427. }
  428. }
  429. } else {
  430. brcmf_err("Unsupported core selected %x\n", coreid);
  431. }
  432. }
  433. static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
  434. {
  435. struct brcmf_core *core;
  436. u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
  437. BRCMF_PCIE_CFGREG_PM_CSR,
  438. BRCMF_PCIE_CFGREG_MSI_CAP,
  439. BRCMF_PCIE_CFGREG_MSI_ADDR_L,
  440. BRCMF_PCIE_CFGREG_MSI_ADDR_H,
  441. BRCMF_PCIE_CFGREG_MSI_DATA,
  442. BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
  443. BRCMF_PCIE_CFGREG_RBAR_CTRL,
  444. BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
  445. BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
  446. BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
  447. u32 i;
  448. u32 val;
  449. u32 lsc;
  450. if (!devinfo->ci)
  451. return;
  452. /* Disable ASPM */
  453. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  454. pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
  455. &lsc);
  456. val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
  457. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
  458. val);
  459. /* Watchdog reset */
  460. brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
  461. WRITECC32(devinfo, watchdog, 4);
  462. msleep(100);
  463. /* Restore ASPM */
  464. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  465. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
  466. lsc);
  467. core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
  468. if (core->rev <= 13) {
  469. for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
  470. brcmf_pcie_write_reg32(devinfo,
  471. BRCMF_PCIE_PCIE2REG_CONFIGADDR,
  472. cfg_offset[i]);
  473. val = brcmf_pcie_read_reg32(devinfo,
  474. BRCMF_PCIE_PCIE2REG_CONFIGDATA);
  475. brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
  476. cfg_offset[i], val);
  477. brcmf_pcie_write_reg32(devinfo,
  478. BRCMF_PCIE_PCIE2REG_CONFIGDATA,
  479. val);
  480. }
  481. }
  482. }
  483. static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
  484. {
  485. u32 config;
  486. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  487. /* BAR1 window may not be sized properly */
  488. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  489. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
  490. config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
  491. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
  492. device_wakeup_enable(&devinfo->pdev->dev);
  493. }
  494. static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
  495. {
  496. if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
  497. brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
  498. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
  499. 5);
  500. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
  501. 0);
  502. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
  503. 7);
  504. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
  505. 0);
  506. }
  507. return 0;
  508. }
  509. static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
  510. u32 resetintr)
  511. {
  512. struct brcmf_core *core;
  513. if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
  514. core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
  515. brcmf_chip_resetcore(core, 0, 0, 0);
  516. }
  517. if (!brcmf_chip_set_active(devinfo->ci, resetintr))
  518. return -EINVAL;
  519. return 0;
  520. }
  521. static int
  522. brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
  523. {
  524. struct brcmf_pcie_shared_info *shared;
  525. u32 addr;
  526. u32 cur_htod_mb_data;
  527. u32 i;
  528. shared = &devinfo->shared;
  529. addr = shared->htod_mb_data_addr;
  530. cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  531. if (cur_htod_mb_data != 0)
  532. brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
  533. cur_htod_mb_data);
  534. i = 0;
  535. while (cur_htod_mb_data != 0) {
  536. msleep(10);
  537. i++;
  538. if (i > 100)
  539. return -EIO;
  540. cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  541. }
  542. brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
  543. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
  544. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
  545. return 0;
  546. }
  547. static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
  548. {
  549. struct brcmf_pcie_shared_info *shared;
  550. u32 addr;
  551. u32 dtoh_mb_data;
  552. shared = &devinfo->shared;
  553. addr = shared->dtoh_mb_data_addr;
  554. dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  555. if (!dtoh_mb_data)
  556. return;
  557. brcmf_pcie_write_tcm32(devinfo, addr, 0);
  558. brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
  559. if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
  560. brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
  561. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
  562. brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
  563. }
  564. if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
  565. brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
  566. if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
  567. brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
  568. devinfo->mbdata_completed = true;
  569. wake_up(&devinfo->mbdata_resp_wait);
  570. }
  571. }
  572. static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
  573. {
  574. struct brcmf_pcie_shared_info *shared;
  575. struct brcmf_pcie_console *console;
  576. u32 addr;
  577. shared = &devinfo->shared;
  578. console = &shared->console;
  579. addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
  580. console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  581. addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
  582. console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  583. addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
  584. console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
  585. brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
  586. console->base_addr, console->buf_addr, console->bufsize);
  587. }
  588. static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
  589. {
  590. struct brcmf_pcie_console *console;
  591. u32 addr;
  592. u8 ch;
  593. u32 newidx;
  594. if (!BRCMF_FWCON_ON())
  595. return;
  596. console = &devinfo->shared.console;
  597. addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
  598. newidx = brcmf_pcie_read_tcm32(devinfo, addr);
  599. while (newidx != console->read_idx) {
  600. addr = console->buf_addr + console->read_idx;
  601. ch = brcmf_pcie_read_tcm8(devinfo, addr);
  602. console->read_idx++;
  603. if (console->read_idx == console->bufsize)
  604. console->read_idx = 0;
  605. if (ch == '\r')
  606. continue;
  607. console->log_str[console->log_idx] = ch;
  608. console->log_idx++;
  609. if ((ch != '\n') &&
  610. (console->log_idx == (sizeof(console->log_str) - 2))) {
  611. ch = '\n';
  612. console->log_str[console->log_idx] = ch;
  613. console->log_idx++;
  614. }
  615. if (ch == '\n') {
  616. console->log_str[console->log_idx] = 0;
  617. pr_debug("CONSOLE: %s", console->log_str);
  618. console->log_idx = 0;
  619. }
  620. }
  621. }
  622. static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
  623. {
  624. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
  625. }
  626. static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
  627. {
  628. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
  629. BRCMF_PCIE_MB_INT_D2H_DB |
  630. BRCMF_PCIE_MB_INT_FN0_0 |
  631. BRCMF_PCIE_MB_INT_FN0_1);
  632. }
  633. static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
  634. {
  635. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  636. if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
  637. brcmf_pcie_intr_disable(devinfo);
  638. brcmf_dbg(PCIE, "Enter\n");
  639. return IRQ_WAKE_THREAD;
  640. }
  641. return IRQ_NONE;
  642. }
  643. static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
  644. {
  645. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  646. u32 status;
  647. devinfo->in_irq = true;
  648. status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  649. brcmf_dbg(PCIE, "Enter %x\n", status);
  650. if (status) {
  651. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  652. status);
  653. if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
  654. BRCMF_PCIE_MB_INT_FN0_1))
  655. brcmf_pcie_handle_mb_data(devinfo);
  656. if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
  657. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  658. brcmf_proto_msgbuf_rx_trigger(
  659. &devinfo->pdev->dev);
  660. }
  661. }
  662. brcmf_pcie_bus_console_read(devinfo);
  663. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  664. brcmf_pcie_intr_enable(devinfo);
  665. devinfo->in_irq = false;
  666. return IRQ_HANDLED;
  667. }
  668. static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
  669. {
  670. struct pci_dev *pdev;
  671. pdev = devinfo->pdev;
  672. brcmf_pcie_intr_disable(devinfo);
  673. brcmf_dbg(PCIE, "Enter\n");
  674. pci_enable_msi(pdev);
  675. if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
  676. brcmf_pcie_isr_thread, IRQF_SHARED,
  677. "brcmf_pcie_intr", devinfo)) {
  678. pci_disable_msi(pdev);
  679. brcmf_err("Failed to request IRQ %d\n", pdev->irq);
  680. return -EIO;
  681. }
  682. devinfo->irq_allocated = true;
  683. return 0;
  684. }
  685. static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
  686. {
  687. struct pci_dev *pdev;
  688. u32 status;
  689. u32 count;
  690. if (!devinfo->irq_allocated)
  691. return;
  692. pdev = devinfo->pdev;
  693. brcmf_pcie_intr_disable(devinfo);
  694. free_irq(pdev->irq, devinfo);
  695. pci_disable_msi(pdev);
  696. msleep(50);
  697. count = 0;
  698. while ((devinfo->in_irq) && (count < 20)) {
  699. msleep(50);
  700. count++;
  701. }
  702. if (devinfo->in_irq)
  703. brcmf_err("Still in IRQ (processing) !!!\n");
  704. status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  705. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
  706. devinfo->irq_allocated = false;
  707. }
  708. static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
  709. {
  710. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  711. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  712. struct brcmf_commonring *commonring = &ring->commonring;
  713. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  714. return -EIO;
  715. brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
  716. commonring->w_ptr, ring->id);
  717. devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
  718. return 0;
  719. }
  720. static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
  721. {
  722. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  723. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  724. struct brcmf_commonring *commonring = &ring->commonring;
  725. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  726. return -EIO;
  727. brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
  728. commonring->r_ptr, ring->id);
  729. devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
  730. return 0;
  731. }
  732. static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
  733. {
  734. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  735. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  736. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  737. return -EIO;
  738. brcmf_dbg(PCIE, "RING !\n");
  739. /* Any arbitrary value will do, lets use 1 */
  740. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1);
  741. return 0;
  742. }
  743. static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
  744. {
  745. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  746. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  747. struct brcmf_commonring *commonring = &ring->commonring;
  748. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  749. return -EIO;
  750. commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
  751. brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
  752. commonring->w_ptr, ring->id);
  753. return 0;
  754. }
  755. static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
  756. {
  757. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  758. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  759. struct brcmf_commonring *commonring = &ring->commonring;
  760. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  761. return -EIO;
  762. commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
  763. brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
  764. commonring->r_ptr, ring->id);
  765. return 0;
  766. }
  767. static void *
  768. brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
  769. u32 size, u32 tcm_dma_phys_addr,
  770. dma_addr_t *dma_handle)
  771. {
  772. void *ring;
  773. u64 address;
  774. ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
  775. GFP_KERNEL);
  776. if (!ring)
  777. return NULL;
  778. address = (u64)*dma_handle;
  779. brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
  780. address & 0xffffffff);
  781. brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
  782. memset(ring, 0, size);
  783. return (ring);
  784. }
  785. static struct brcmf_pcie_ringbuf *
  786. brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
  787. u32 tcm_ring_phys_addr)
  788. {
  789. void *dma_buf;
  790. dma_addr_t dma_handle;
  791. struct brcmf_pcie_ringbuf *ring;
  792. u32 size;
  793. u32 addr;
  794. size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id];
  795. dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
  796. tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
  797. &dma_handle);
  798. if (!dma_buf)
  799. return NULL;
  800. addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
  801. brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
  802. addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
  803. brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]);
  804. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  805. if (!ring) {
  806. dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
  807. dma_handle);
  808. return NULL;
  809. }
  810. brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
  811. brcmf_ring_itemsize[ring_id], dma_buf);
  812. ring->dma_handle = dma_handle;
  813. ring->devinfo = devinfo;
  814. brcmf_commonring_register_cb(&ring->commonring,
  815. brcmf_pcie_ring_mb_ring_bell,
  816. brcmf_pcie_ring_mb_update_rptr,
  817. brcmf_pcie_ring_mb_update_wptr,
  818. brcmf_pcie_ring_mb_write_rptr,
  819. brcmf_pcie_ring_mb_write_wptr, ring);
  820. return (ring);
  821. }
  822. static void brcmf_pcie_release_ringbuffer(struct device *dev,
  823. struct brcmf_pcie_ringbuf *ring)
  824. {
  825. void *dma_buf;
  826. u32 size;
  827. if (!ring)
  828. return;
  829. dma_buf = ring->commonring.buf_addr;
  830. if (dma_buf) {
  831. size = ring->commonring.depth * ring->commonring.item_len;
  832. dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
  833. }
  834. kfree(ring);
  835. }
  836. static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
  837. {
  838. u32 i;
  839. for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
  840. brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
  841. devinfo->shared.commonrings[i]);
  842. devinfo->shared.commonrings[i] = NULL;
  843. }
  844. kfree(devinfo->shared.flowrings);
  845. devinfo->shared.flowrings = NULL;
  846. if (devinfo->idxbuf) {
  847. dma_free_coherent(&devinfo->pdev->dev,
  848. devinfo->idxbuf_sz,
  849. devinfo->idxbuf,
  850. devinfo->idxbuf_dmahandle);
  851. devinfo->idxbuf = NULL;
  852. }
  853. }
  854. static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
  855. {
  856. struct brcmf_pcie_ringbuf *ring;
  857. struct brcmf_pcie_ringbuf *rings;
  858. u32 ring_addr;
  859. u32 d2h_w_idx_ptr;
  860. u32 d2h_r_idx_ptr;
  861. u32 h2d_w_idx_ptr;
  862. u32 h2d_r_idx_ptr;
  863. u32 addr;
  864. u32 ring_mem_ptr;
  865. u32 i;
  866. u64 address;
  867. u32 bufsz;
  868. u16 max_sub_queues;
  869. u8 idx_offset;
  870. ring_addr = devinfo->shared.ring_info_addr;
  871. brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
  872. addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
  873. max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
  874. if (devinfo->dma_idx_sz != 0) {
  875. bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
  876. devinfo->dma_idx_sz * 2;
  877. devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
  878. &devinfo->idxbuf_dmahandle,
  879. GFP_KERNEL);
  880. if (!devinfo->idxbuf)
  881. devinfo->dma_idx_sz = 0;
  882. }
  883. if (devinfo->dma_idx_sz == 0) {
  884. addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
  885. d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  886. addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
  887. d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  888. addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
  889. h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  890. addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
  891. h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  892. idx_offset = sizeof(u32);
  893. devinfo->write_ptr = brcmf_pcie_write_tcm16;
  894. devinfo->read_ptr = brcmf_pcie_read_tcm16;
  895. brcmf_dbg(PCIE, "Using TCM indices\n");
  896. } else {
  897. memset(devinfo->idxbuf, 0, bufsz);
  898. devinfo->idxbuf_sz = bufsz;
  899. idx_offset = devinfo->dma_idx_sz;
  900. devinfo->write_ptr = brcmf_pcie_write_idx;
  901. devinfo->read_ptr = brcmf_pcie_read_idx;
  902. h2d_w_idx_ptr = 0;
  903. addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
  904. address = (u64)devinfo->idxbuf_dmahandle;
  905. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  906. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  907. h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
  908. addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
  909. address += max_sub_queues * idx_offset;
  910. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  911. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  912. d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
  913. addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
  914. address += max_sub_queues * idx_offset;
  915. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  916. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  917. d2h_r_idx_ptr = d2h_w_idx_ptr +
  918. BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
  919. addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
  920. address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
  921. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  922. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  923. brcmf_dbg(PCIE, "Using host memory indices\n");
  924. }
  925. addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
  926. ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  927. for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
  928. ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
  929. if (!ring)
  930. goto fail;
  931. ring->w_idx_addr = h2d_w_idx_ptr;
  932. ring->r_idx_addr = h2d_r_idx_ptr;
  933. ring->id = i;
  934. devinfo->shared.commonrings[i] = ring;
  935. h2d_w_idx_ptr += idx_offset;
  936. h2d_r_idx_ptr += idx_offset;
  937. ring_mem_ptr += BRCMF_RING_MEM_SZ;
  938. }
  939. for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
  940. i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
  941. ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
  942. if (!ring)
  943. goto fail;
  944. ring->w_idx_addr = d2h_w_idx_ptr;
  945. ring->r_idx_addr = d2h_r_idx_ptr;
  946. ring->id = i;
  947. devinfo->shared.commonrings[i] = ring;
  948. d2h_w_idx_ptr += idx_offset;
  949. d2h_r_idx_ptr += idx_offset;
  950. ring_mem_ptr += BRCMF_RING_MEM_SZ;
  951. }
  952. devinfo->shared.nrof_flowrings =
  953. max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
  954. rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
  955. GFP_KERNEL);
  956. if (!rings)
  957. goto fail;
  958. brcmf_dbg(PCIE, "Nr of flowrings is %d\n",
  959. devinfo->shared.nrof_flowrings);
  960. for (i = 0; i < devinfo->shared.nrof_flowrings; i++) {
  961. ring = &rings[i];
  962. ring->devinfo = devinfo;
  963. ring->id = i + BRCMF_NROF_COMMON_MSGRINGS;
  964. brcmf_commonring_register_cb(&ring->commonring,
  965. brcmf_pcie_ring_mb_ring_bell,
  966. brcmf_pcie_ring_mb_update_rptr,
  967. brcmf_pcie_ring_mb_update_wptr,
  968. brcmf_pcie_ring_mb_write_rptr,
  969. brcmf_pcie_ring_mb_write_wptr,
  970. ring);
  971. ring->w_idx_addr = h2d_w_idx_ptr;
  972. ring->r_idx_addr = h2d_r_idx_ptr;
  973. h2d_w_idx_ptr += idx_offset;
  974. h2d_r_idx_ptr += idx_offset;
  975. }
  976. devinfo->shared.flowrings = rings;
  977. return 0;
  978. fail:
  979. brcmf_err("Allocating ring buffers failed\n");
  980. brcmf_pcie_release_ringbuffers(devinfo);
  981. return -ENOMEM;
  982. }
  983. static void
  984. brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
  985. {
  986. if (devinfo->shared.scratch)
  987. dma_free_coherent(&devinfo->pdev->dev,
  988. BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
  989. devinfo->shared.scratch,
  990. devinfo->shared.scratch_dmahandle);
  991. if (devinfo->shared.ringupd)
  992. dma_free_coherent(&devinfo->pdev->dev,
  993. BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
  994. devinfo->shared.ringupd,
  995. devinfo->shared.ringupd_dmahandle);
  996. }
  997. static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
  998. {
  999. u64 address;
  1000. u32 addr;
  1001. devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
  1002. BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
  1003. &devinfo->shared.scratch_dmahandle, GFP_KERNEL);
  1004. if (!devinfo->shared.scratch)
  1005. goto fail;
  1006. memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
  1007. addr = devinfo->shared.tcm_base_address +
  1008. BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
  1009. address = (u64)devinfo->shared.scratch_dmahandle;
  1010. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  1011. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  1012. addr = devinfo->shared.tcm_base_address +
  1013. BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
  1014. brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
  1015. devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
  1016. BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
  1017. &devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
  1018. if (!devinfo->shared.ringupd)
  1019. goto fail;
  1020. memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
  1021. addr = devinfo->shared.tcm_base_address +
  1022. BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
  1023. address = (u64)devinfo->shared.ringupd_dmahandle;
  1024. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  1025. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  1026. addr = devinfo->shared.tcm_base_address +
  1027. BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
  1028. brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
  1029. return 0;
  1030. fail:
  1031. brcmf_err("Allocating scratch buffers failed\n");
  1032. brcmf_pcie_release_scratchbuffers(devinfo);
  1033. return -ENOMEM;
  1034. }
  1035. static void brcmf_pcie_down(struct device *dev)
  1036. {
  1037. }
  1038. static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
  1039. {
  1040. return 0;
  1041. }
  1042. static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
  1043. uint len)
  1044. {
  1045. return 0;
  1046. }
  1047. static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
  1048. uint len)
  1049. {
  1050. return 0;
  1051. }
  1052. static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
  1053. {
  1054. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1055. struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
  1056. struct brcmf_pciedev_info *devinfo = buspub->devinfo;
  1057. brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
  1058. devinfo->wowl_enabled = enabled;
  1059. }
  1060. static size_t brcmf_pcie_get_ramsize(struct device *dev)
  1061. {
  1062. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1063. struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
  1064. struct brcmf_pciedev_info *devinfo = buspub->devinfo;
  1065. return devinfo->ci->ramsize - devinfo->ci->srsize;
  1066. }
  1067. static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
  1068. {
  1069. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1070. struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
  1071. struct brcmf_pciedev_info *devinfo = buspub->devinfo;
  1072. brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
  1073. brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
  1074. return 0;
  1075. }
  1076. static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
  1077. .txdata = brcmf_pcie_tx,
  1078. .stop = brcmf_pcie_down,
  1079. .txctl = brcmf_pcie_tx_ctlpkt,
  1080. .rxctl = brcmf_pcie_rx_ctlpkt,
  1081. .wowl_config = brcmf_pcie_wowl_config,
  1082. .get_ramsize = brcmf_pcie_get_ramsize,
  1083. .get_memdump = brcmf_pcie_get_memdump,
  1084. };
  1085. static void
  1086. brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data,
  1087. u32 data_len)
  1088. {
  1089. __le32 *field;
  1090. u32 newsize;
  1091. if (data_len < BRCMF_RAMSIZE_OFFSET + 8)
  1092. return;
  1093. field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET];
  1094. if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC)
  1095. return;
  1096. field++;
  1097. newsize = le32_to_cpup(field);
  1098. brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n",
  1099. newsize);
  1100. devinfo->ci->ramsize = newsize;
  1101. }
  1102. static int
  1103. brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
  1104. u32 sharedram_addr)
  1105. {
  1106. struct brcmf_pcie_shared_info *shared;
  1107. u32 addr;
  1108. u32 version;
  1109. shared = &devinfo->shared;
  1110. shared->tcm_base_address = sharedram_addr;
  1111. shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
  1112. version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK;
  1113. brcmf_dbg(PCIE, "PCIe protocol version %d\n", version);
  1114. if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
  1115. (version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
  1116. brcmf_err("Unsupported PCIE version %d\n", version);
  1117. return -EINVAL;
  1118. }
  1119. /* check firmware support dma indicies */
  1120. if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
  1121. if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
  1122. devinfo->dma_idx_sz = sizeof(u16);
  1123. else
  1124. devinfo->dma_idx_sz = sizeof(u32);
  1125. }
  1126. addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
  1127. shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
  1128. if (shared->max_rxbufpost == 0)
  1129. shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
  1130. addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
  1131. shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
  1132. addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
  1133. shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1134. addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
  1135. shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1136. addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
  1137. shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1138. brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
  1139. shared->max_rxbufpost, shared->rx_dataoffset);
  1140. brcmf_pcie_bus_console_init(devinfo);
  1141. return 0;
  1142. }
  1143. static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
  1144. const struct firmware *fw, void *nvram,
  1145. u32 nvram_len)
  1146. {
  1147. u32 sharedram_addr;
  1148. u32 sharedram_addr_written;
  1149. u32 loop_counter;
  1150. int err;
  1151. u32 address;
  1152. u32 resetintr;
  1153. brcmf_dbg(PCIE, "Halt ARM.\n");
  1154. err = brcmf_pcie_enter_download_state(devinfo);
  1155. if (err)
  1156. return err;
  1157. brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
  1158. brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
  1159. (void *)fw->data, fw->size);
  1160. resetintr = get_unaligned_le32(fw->data);
  1161. release_firmware(fw);
  1162. /* reset last 4 bytes of RAM address. to be used for shared
  1163. * area. This identifies when FW is running
  1164. */
  1165. brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
  1166. if (nvram) {
  1167. brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
  1168. address = devinfo->ci->rambase + devinfo->ci->ramsize -
  1169. nvram_len;
  1170. brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
  1171. brcmf_fw_nvram_free(nvram);
  1172. } else {
  1173. brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
  1174. devinfo->nvram_name);
  1175. }
  1176. sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
  1177. devinfo->ci->ramsize -
  1178. 4);
  1179. brcmf_dbg(PCIE, "Bring ARM in running state\n");
  1180. err = brcmf_pcie_exit_download_state(devinfo, resetintr);
  1181. if (err)
  1182. return err;
  1183. brcmf_dbg(PCIE, "Wait for FW init\n");
  1184. sharedram_addr = sharedram_addr_written;
  1185. loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
  1186. while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
  1187. msleep(50);
  1188. sharedram_addr = brcmf_pcie_read_ram32(devinfo,
  1189. devinfo->ci->ramsize -
  1190. 4);
  1191. loop_counter--;
  1192. }
  1193. if (sharedram_addr == sharedram_addr_written) {
  1194. brcmf_err("FW failed to initialize\n");
  1195. return -ENODEV;
  1196. }
  1197. brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
  1198. return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
  1199. }
  1200. static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
  1201. {
  1202. struct pci_dev *pdev;
  1203. int err;
  1204. phys_addr_t bar0_addr, bar1_addr;
  1205. ulong bar1_size;
  1206. pdev = devinfo->pdev;
  1207. err = pci_enable_device(pdev);
  1208. if (err) {
  1209. brcmf_err("pci_enable_device failed err=%d\n", err);
  1210. return err;
  1211. }
  1212. pci_set_master(pdev);
  1213. /* Bar-0 mapped address */
  1214. bar0_addr = pci_resource_start(pdev, 0);
  1215. /* Bar-1 mapped address */
  1216. bar1_addr = pci_resource_start(pdev, 2);
  1217. /* read Bar-1 mapped memory range */
  1218. bar1_size = pci_resource_len(pdev, 2);
  1219. if ((bar1_size == 0) || (bar1_addr == 0)) {
  1220. brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
  1221. bar1_size, (unsigned long long)bar1_addr);
  1222. return -EINVAL;
  1223. }
  1224. devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
  1225. devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size);
  1226. if (!devinfo->regs || !devinfo->tcm) {
  1227. brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs,
  1228. devinfo->tcm);
  1229. return -EINVAL;
  1230. }
  1231. brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
  1232. devinfo->regs, (unsigned long long)bar0_addr);
  1233. brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n",
  1234. devinfo->tcm, (unsigned long long)bar1_addr,
  1235. (unsigned int)bar1_size);
  1236. return 0;
  1237. }
  1238. static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
  1239. {
  1240. if (devinfo->tcm)
  1241. iounmap(devinfo->tcm);
  1242. if (devinfo->regs)
  1243. iounmap(devinfo->regs);
  1244. pci_disable_device(devinfo->pdev);
  1245. }
  1246. static int brcmf_pcie_attach_bus(struct brcmf_pciedev_info *devinfo)
  1247. {
  1248. int ret;
  1249. /* Attach to the common driver interface */
  1250. ret = brcmf_attach(&devinfo->pdev->dev, devinfo->settings);
  1251. if (ret) {
  1252. brcmf_err("brcmf_attach failed\n");
  1253. } else {
  1254. ret = brcmf_bus_start(&devinfo->pdev->dev);
  1255. if (ret)
  1256. brcmf_err("dongle is not responding\n");
  1257. }
  1258. return ret;
  1259. }
  1260. static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
  1261. {
  1262. u32 ret_addr;
  1263. ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
  1264. addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
  1265. pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
  1266. return ret_addr;
  1267. }
  1268. static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
  1269. {
  1270. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1271. addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
  1272. return brcmf_pcie_read_reg32(devinfo, addr);
  1273. }
  1274. static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
  1275. {
  1276. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1277. addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
  1278. brcmf_pcie_write_reg32(devinfo, addr, value);
  1279. }
  1280. static int brcmf_pcie_buscoreprep(void *ctx)
  1281. {
  1282. return brcmf_pcie_get_resource(ctx);
  1283. }
  1284. static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
  1285. {
  1286. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1287. u32 val;
  1288. devinfo->ci = chip;
  1289. brcmf_pcie_reset_device(devinfo);
  1290. val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  1291. if (val != 0xffffffff)
  1292. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  1293. val);
  1294. return 0;
  1295. }
  1296. static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
  1297. u32 rstvec)
  1298. {
  1299. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1300. brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
  1301. }
  1302. static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
  1303. .prepare = brcmf_pcie_buscoreprep,
  1304. .reset = brcmf_pcie_buscore_reset,
  1305. .activate = brcmf_pcie_buscore_activate,
  1306. .read32 = brcmf_pcie_buscore_read32,
  1307. .write32 = brcmf_pcie_buscore_write32,
  1308. };
  1309. static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
  1310. void *nvram, u32 nvram_len)
  1311. {
  1312. struct brcmf_bus *bus = dev_get_drvdata(dev);
  1313. struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
  1314. struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
  1315. struct brcmf_commonring **flowrings;
  1316. int ret;
  1317. u32 i;
  1318. brcmf_pcie_attach(devinfo);
  1319. /* Some of the firmwares have the size of the memory of the device
  1320. * defined inside the firmware. This is because part of the memory in
  1321. * the device is shared and the devision is determined by FW. Parse
  1322. * the firmware and adjust the chip memory size now.
  1323. */
  1324. brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size);
  1325. ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
  1326. if (ret)
  1327. goto fail;
  1328. devinfo->state = BRCMFMAC_PCIE_STATE_UP;
  1329. ret = brcmf_pcie_init_ringbuffers(devinfo);
  1330. if (ret)
  1331. goto fail;
  1332. ret = brcmf_pcie_init_scratchbuffers(devinfo);
  1333. if (ret)
  1334. goto fail;
  1335. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  1336. ret = brcmf_pcie_request_irq(devinfo);
  1337. if (ret)
  1338. goto fail;
  1339. /* hook the commonrings in the bus structure. */
  1340. for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
  1341. bus->msgbuf->commonrings[i] =
  1342. &devinfo->shared.commonrings[i]->commonring;
  1343. flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
  1344. GFP_KERNEL);
  1345. if (!flowrings)
  1346. goto fail;
  1347. for (i = 0; i < devinfo->shared.nrof_flowrings; i++)
  1348. flowrings[i] = &devinfo->shared.flowrings[i].commonring;
  1349. bus->msgbuf->flowrings = flowrings;
  1350. bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
  1351. bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
  1352. bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings;
  1353. init_waitqueue_head(&devinfo->mbdata_resp_wait);
  1354. brcmf_pcie_intr_enable(devinfo);
  1355. if (brcmf_pcie_attach_bus(devinfo) == 0)
  1356. return;
  1357. brcmf_pcie_bus_console_read(devinfo);
  1358. fail:
  1359. device_release_driver(dev);
  1360. }
  1361. static int
  1362. brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1363. {
  1364. int ret;
  1365. struct brcmf_pciedev_info *devinfo;
  1366. struct brcmf_pciedev *pcie_bus_dev;
  1367. struct brcmf_bus *bus;
  1368. u16 domain_nr;
  1369. u16 bus_nr;
  1370. domain_nr = pci_domain_nr(pdev->bus) + 1;
  1371. bus_nr = pdev->bus->number;
  1372. brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device,
  1373. domain_nr, bus_nr);
  1374. ret = -ENOMEM;
  1375. devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
  1376. if (devinfo == NULL)
  1377. return ret;
  1378. devinfo->pdev = pdev;
  1379. pcie_bus_dev = NULL;
  1380. devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
  1381. if (IS_ERR(devinfo->ci)) {
  1382. ret = PTR_ERR(devinfo->ci);
  1383. devinfo->ci = NULL;
  1384. goto fail;
  1385. }
  1386. pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
  1387. if (pcie_bus_dev == NULL) {
  1388. ret = -ENOMEM;
  1389. goto fail;
  1390. }
  1391. devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev,
  1392. BRCMF_BUSTYPE_PCIE,
  1393. devinfo->ci->chip,
  1394. devinfo->ci->chiprev);
  1395. if (!devinfo->settings) {
  1396. ret = -ENOMEM;
  1397. goto fail;
  1398. }
  1399. bus = kzalloc(sizeof(*bus), GFP_KERNEL);
  1400. if (!bus) {
  1401. ret = -ENOMEM;
  1402. goto fail;
  1403. }
  1404. bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
  1405. if (!bus->msgbuf) {
  1406. ret = -ENOMEM;
  1407. kfree(bus);
  1408. goto fail;
  1409. }
  1410. /* hook it all together. */
  1411. pcie_bus_dev->devinfo = devinfo;
  1412. pcie_bus_dev->bus = bus;
  1413. bus->dev = &pdev->dev;
  1414. bus->bus_priv.pcie = pcie_bus_dev;
  1415. bus->ops = &brcmf_pcie_bus_ops;
  1416. bus->proto_type = BRCMF_PROTO_MSGBUF;
  1417. bus->chip = devinfo->coreid;
  1418. bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
  1419. dev_set_drvdata(&pdev->dev, bus);
  1420. ret = brcmf_fw_map_chip_to_name(devinfo->ci->chip, devinfo->ci->chiprev,
  1421. brcmf_pcie_fwnames,
  1422. ARRAY_SIZE(brcmf_pcie_fwnames),
  1423. devinfo->fw_name, devinfo->nvram_name);
  1424. if (ret)
  1425. goto fail_bus;
  1426. ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM |
  1427. BRCMF_FW_REQ_NV_OPTIONAL,
  1428. devinfo->fw_name, devinfo->nvram_name,
  1429. brcmf_pcie_setup, domain_nr, bus_nr);
  1430. if (ret == 0)
  1431. return 0;
  1432. fail_bus:
  1433. kfree(bus->msgbuf);
  1434. kfree(bus);
  1435. fail:
  1436. brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device);
  1437. brcmf_pcie_release_resource(devinfo);
  1438. if (devinfo->ci)
  1439. brcmf_chip_detach(devinfo->ci);
  1440. if (devinfo->settings)
  1441. brcmf_release_module_param(devinfo->settings);
  1442. kfree(pcie_bus_dev);
  1443. kfree(devinfo);
  1444. return ret;
  1445. }
  1446. static void
  1447. brcmf_pcie_remove(struct pci_dev *pdev)
  1448. {
  1449. struct brcmf_pciedev_info *devinfo;
  1450. struct brcmf_bus *bus;
  1451. brcmf_dbg(PCIE, "Enter\n");
  1452. bus = dev_get_drvdata(&pdev->dev);
  1453. if (bus == NULL)
  1454. return;
  1455. devinfo = bus->bus_priv.pcie->devinfo;
  1456. devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
  1457. if (devinfo->ci)
  1458. brcmf_pcie_intr_disable(devinfo);
  1459. brcmf_detach(&pdev->dev);
  1460. kfree(bus->bus_priv.pcie);
  1461. kfree(bus->msgbuf->flowrings);
  1462. kfree(bus->msgbuf);
  1463. kfree(bus);
  1464. brcmf_pcie_release_irq(devinfo);
  1465. brcmf_pcie_release_scratchbuffers(devinfo);
  1466. brcmf_pcie_release_ringbuffers(devinfo);
  1467. brcmf_pcie_reset_device(devinfo);
  1468. brcmf_pcie_release_resource(devinfo);
  1469. if (devinfo->ci)
  1470. brcmf_chip_detach(devinfo->ci);
  1471. if (devinfo->settings)
  1472. brcmf_release_module_param(devinfo->settings);
  1473. kfree(devinfo);
  1474. dev_set_drvdata(&pdev->dev, NULL);
  1475. }
  1476. #ifdef CONFIG_PM
  1477. static int brcmf_pcie_pm_enter_D3(struct device *dev)
  1478. {
  1479. struct brcmf_pciedev_info *devinfo;
  1480. struct brcmf_bus *bus;
  1481. brcmf_dbg(PCIE, "Enter\n");
  1482. bus = dev_get_drvdata(dev);
  1483. devinfo = bus->bus_priv.pcie->devinfo;
  1484. brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
  1485. devinfo->mbdata_completed = false;
  1486. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
  1487. wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed,
  1488. BRCMF_PCIE_MBDATA_TIMEOUT);
  1489. if (!devinfo->mbdata_completed) {
  1490. brcmf_err("Timeout on response for entering D3 substate\n");
  1491. return -EIO;
  1492. }
  1493. devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
  1494. return 0;
  1495. }
  1496. static int brcmf_pcie_pm_leave_D3(struct device *dev)
  1497. {
  1498. struct brcmf_pciedev_info *devinfo;
  1499. struct brcmf_bus *bus;
  1500. struct pci_dev *pdev;
  1501. int err;
  1502. brcmf_dbg(PCIE, "Enter\n");
  1503. bus = dev_get_drvdata(dev);
  1504. devinfo = bus->bus_priv.pcie->devinfo;
  1505. brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
  1506. /* Check if device is still up and running, if so we are ready */
  1507. if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
  1508. brcmf_dbg(PCIE, "Try to wakeup device....\n");
  1509. if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
  1510. goto cleanup;
  1511. brcmf_dbg(PCIE, "Hot resume, continue....\n");
  1512. devinfo->state = BRCMFMAC_PCIE_STATE_UP;
  1513. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  1514. brcmf_bus_change_state(bus, BRCMF_BUS_UP);
  1515. brcmf_pcie_intr_enable(devinfo);
  1516. return 0;
  1517. }
  1518. cleanup:
  1519. brcmf_chip_detach(devinfo->ci);
  1520. devinfo->ci = NULL;
  1521. pdev = devinfo->pdev;
  1522. brcmf_pcie_remove(pdev);
  1523. err = brcmf_pcie_probe(pdev, NULL);
  1524. if (err)
  1525. brcmf_err("probe after resume failed, err=%d\n", err);
  1526. return err;
  1527. }
  1528. static const struct dev_pm_ops brcmf_pciedrvr_pm = {
  1529. .suspend = brcmf_pcie_pm_enter_D3,
  1530. .resume = brcmf_pcie_pm_leave_D3,
  1531. .freeze = brcmf_pcie_pm_enter_D3,
  1532. .restore = brcmf_pcie_pm_leave_D3,
  1533. };
  1534. #endif /* CONFIG_PM */
  1535. #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
  1536. PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
  1537. #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \
  1538. BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
  1539. subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
  1540. static struct pci_device_id brcmf_pcie_devid_table[] = {
  1541. BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
  1542. BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
  1543. BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
  1544. BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
  1545. BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
  1546. BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
  1547. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
  1548. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
  1549. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
  1550. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
  1551. BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
  1552. BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
  1553. BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
  1554. BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
  1555. BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
  1556. BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
  1557. BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
  1558. BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
  1559. { /* end: all zeroes */ }
  1560. };
  1561. MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
  1562. static struct pci_driver brcmf_pciedrvr = {
  1563. .node = {},
  1564. .name = KBUILD_MODNAME,
  1565. .id_table = brcmf_pcie_devid_table,
  1566. .probe = brcmf_pcie_probe,
  1567. .remove = brcmf_pcie_remove,
  1568. #ifdef CONFIG_PM
  1569. .driver.pm = &brcmf_pciedrvr_pm,
  1570. #endif
  1571. };
  1572. void brcmf_pcie_register(void)
  1573. {
  1574. int err;
  1575. brcmf_dbg(PCIE, "Enter\n");
  1576. err = pci_register_driver(&brcmf_pciedrvr);
  1577. if (err)
  1578. brcmf_err("PCIE driver registration failed, err=%d\n", err);
  1579. }
  1580. void brcmf_pcie_exit(void)
  1581. {
  1582. brcmf_dbg(PCIE, "Enter\n");
  1583. pci_unregister_driver(&brcmf_pciedrvr);
  1584. }