pcie.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901
  1. /* Copyright (c) 2014 Broadcom Corporation
  2. *
  3. * Permission to use, copy, modify, and/or distribute this software for any
  4. * purpose with or without fee is hereby granted, provided that the above
  5. * copyright notice and this permission notice appear in all copies.
  6. *
  7. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  8. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  9. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  10. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  12. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  13. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/firmware.h>
  18. #include <linux/pci.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/delay.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/bcma/bcma.h>
  23. #include <linux/sched.h>
  24. #include <asm/unaligned.h>
  25. #include <soc.h>
  26. #include <chipcommon.h>
  27. #include <brcmu_utils.h>
  28. #include <brcmu_wifi.h>
  29. #include <brcm_hw_ids.h>
  30. #include "debug.h"
  31. #include "bus.h"
  32. #include "commonring.h"
  33. #include "msgbuf.h"
  34. #include "pcie.h"
  35. #include "firmware.h"
  36. #include "chip.h"
  37. enum brcmf_pcie_state {
  38. BRCMFMAC_PCIE_STATE_DOWN,
  39. BRCMFMAC_PCIE_STATE_UP
  40. };
  41. #define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin"
  42. #define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt"
  43. #define BRCMF_PCIE_4354_FW_NAME "brcm/brcmfmac4354-pcie.bin"
  44. #define BRCMF_PCIE_4354_NVRAM_NAME "brcm/brcmfmac4354-pcie.txt"
  45. #define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin"
  46. #define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
  47. #define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
  48. #define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt"
  49. #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
  50. #define BRCMF_PCIE_TCM_MAP_SIZE (4096 * 1024)
  51. #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
  52. /* backplane addres space accessed by BAR0 */
  53. #define BRCMF_PCIE_BAR0_WINDOW 0x80
  54. #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
  55. #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
  56. #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
  57. #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
  58. #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
  59. #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
  60. #define BRCMF_PCIE_REG_INTSTATUS 0x90
  61. #define BRCMF_PCIE_REG_INTMASK 0x94
  62. #define BRCMF_PCIE_REG_SBMBX 0x98
  63. #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
  64. #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
  65. #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
  66. #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
  67. #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
  68. #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140
  69. #define BRCMF_PCIE_GENREV1 1
  70. #define BRCMF_PCIE_GENREV2 2
  71. #define BRCMF_PCIE2_INTA 0x01
  72. #define BRCMF_PCIE2_INTB 0x02
  73. #define BRCMF_PCIE_INT_0 0x01
  74. #define BRCMF_PCIE_INT_1 0x02
  75. #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
  76. BRCMF_PCIE_INT_1)
  77. #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
  78. #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
  79. #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
  80. #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
  81. #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
  82. #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
  83. #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
  84. #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
  85. #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
  86. #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
  87. #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
  88. BRCMF_PCIE_MB_INT_D2H0_DB1 | \
  89. BRCMF_PCIE_MB_INT_D2H1_DB0 | \
  90. BRCMF_PCIE_MB_INT_D2H1_DB1 | \
  91. BRCMF_PCIE_MB_INT_D2H2_DB0 | \
  92. BRCMF_PCIE_MB_INT_D2H2_DB1 | \
  93. BRCMF_PCIE_MB_INT_D2H3_DB0 | \
  94. BRCMF_PCIE_MB_INT_D2H3_DB1)
  95. #define BRCMF_PCIE_MIN_SHARED_VERSION 4
  96. #define BRCMF_PCIE_MAX_SHARED_VERSION 5
  97. #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
  98. #define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000
  99. #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
  100. #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
  101. #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
  102. #define BRCMF_SHARED_RING_BASE_OFFSET 52
  103. #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
  104. #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
  105. #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
  106. #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
  107. #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
  108. #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
  109. #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
  110. #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
  111. #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
  112. #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
  113. #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
  114. #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
  115. #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
  116. #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
  117. #define BRCMF_RING_MAX_ITEM_OFFSET 4
  118. #define BRCMF_RING_LEN_ITEMS_OFFSET 6
  119. #define BRCMF_RING_MEM_SZ 16
  120. #define BRCMF_RING_STATE_SZ 8
  121. #define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET 4
  122. #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
  123. #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
  124. #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
  125. #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
  126. #define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
  127. #define BRCMF_DEF_MAX_RXBUFPOST 255
  128. #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
  129. #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
  130. #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
  131. #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
  132. #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
  133. #define BRCMF_D2H_DEV_D3_ACK 0x00000001
  134. #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
  135. #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
  136. #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
  137. #define BRCMF_H2D_HOST_DS_ACK 0x00000002
  138. #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
  139. #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
  140. #define BRCMF_PCIE_MBDATA_TIMEOUT 2000
  141. #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
  142. #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
  143. #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
  144. #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
  145. #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
  146. #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
  147. #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
  148. #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
  149. #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
  150. #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
  151. #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
  152. #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
  153. #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
  154. MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
  155. MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
  156. MODULE_FIRMWARE(BRCMF_PCIE_4354_FW_NAME);
  157. MODULE_FIRMWARE(BRCMF_PCIE_4354_NVRAM_NAME);
  158. MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
  159. MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
  160. struct brcmf_pcie_console {
  161. u32 base_addr;
  162. u32 buf_addr;
  163. u32 bufsize;
  164. u32 read_idx;
  165. u8 log_str[256];
  166. u8 log_idx;
  167. };
  168. struct brcmf_pcie_shared_info {
  169. u32 tcm_base_address;
  170. u32 flags;
  171. struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
  172. struct brcmf_pcie_ringbuf *flowrings;
  173. u16 max_rxbufpost;
  174. u32 nrof_flowrings;
  175. u32 rx_dataoffset;
  176. u32 htod_mb_data_addr;
  177. u32 dtoh_mb_data_addr;
  178. u32 ring_info_addr;
  179. struct brcmf_pcie_console console;
  180. void *scratch;
  181. dma_addr_t scratch_dmahandle;
  182. void *ringupd;
  183. dma_addr_t ringupd_dmahandle;
  184. };
  185. struct brcmf_pcie_core_info {
  186. u32 base;
  187. u32 wrapbase;
  188. };
  189. struct brcmf_pciedev_info {
  190. enum brcmf_pcie_state state;
  191. bool in_irq;
  192. bool irq_requested;
  193. struct pci_dev *pdev;
  194. char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
  195. char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
  196. void __iomem *regs;
  197. void __iomem *tcm;
  198. u32 tcm_size;
  199. u32 ram_base;
  200. u32 ram_size;
  201. struct brcmf_chip *ci;
  202. u32 coreid;
  203. u32 generic_corerev;
  204. struct brcmf_pcie_shared_info shared;
  205. void (*ringbell)(struct brcmf_pciedev_info *devinfo);
  206. wait_queue_head_t mbdata_resp_wait;
  207. bool mbdata_completed;
  208. bool irq_allocated;
  209. bool wowl_enabled;
  210. };
  211. struct brcmf_pcie_ringbuf {
  212. struct brcmf_commonring commonring;
  213. dma_addr_t dma_handle;
  214. u32 w_idx_addr;
  215. u32 r_idx_addr;
  216. struct brcmf_pciedev_info *devinfo;
  217. u8 id;
  218. };
  219. static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
  220. BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
  221. BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
  222. BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
  223. BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
  224. BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
  225. };
  226. static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
  227. BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
  228. BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
  229. BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
  230. BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
  231. BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
  232. };
  233. /* dma flushing needs implementation for mips and arm platforms. Should
  234. * be put in util. Note, this is not real flushing. It is virtual non
  235. * cached memory. Only write buffers should have to be drained. Though
  236. * this may be different depending on platform......
  237. */
  238. #define brcmf_dma_flush(addr, len)
  239. #define brcmf_dma_invalidate_cache(addr, len)
  240. static u32
  241. brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
  242. {
  243. void __iomem *address = devinfo->regs + reg_offset;
  244. return (ioread32(address));
  245. }
  246. static void
  247. brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
  248. u32 value)
  249. {
  250. void __iomem *address = devinfo->regs + reg_offset;
  251. iowrite32(value, address);
  252. }
  253. static u8
  254. brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  255. {
  256. void __iomem *address = devinfo->tcm + mem_offset;
  257. return (ioread8(address));
  258. }
  259. static u16
  260. brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  261. {
  262. void __iomem *address = devinfo->tcm + mem_offset;
  263. return (ioread16(address));
  264. }
  265. static void
  266. brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  267. u16 value)
  268. {
  269. void __iomem *address = devinfo->tcm + mem_offset;
  270. iowrite16(value, address);
  271. }
  272. static u32
  273. brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  274. {
  275. void __iomem *address = devinfo->tcm + mem_offset;
  276. return (ioread32(address));
  277. }
  278. static void
  279. brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  280. u32 value)
  281. {
  282. void __iomem *address = devinfo->tcm + mem_offset;
  283. iowrite32(value, address);
  284. }
  285. static u32
  286. brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  287. {
  288. void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
  289. return (ioread32(addr));
  290. }
  291. static void
  292. brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  293. u32 value)
  294. {
  295. void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
  296. iowrite32(value, addr);
  297. }
  298. static void
  299. brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  300. void *srcaddr, u32 len)
  301. {
  302. void __iomem *address = devinfo->tcm + mem_offset;
  303. __le32 *src32;
  304. __le16 *src16;
  305. u8 *src8;
  306. if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
  307. if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
  308. src8 = (u8 *)srcaddr;
  309. while (len) {
  310. iowrite8(*src8, address);
  311. address++;
  312. src8++;
  313. len--;
  314. }
  315. } else {
  316. len = len / 2;
  317. src16 = (__le16 *)srcaddr;
  318. while (len) {
  319. iowrite16(le16_to_cpu(*src16), address);
  320. address += 2;
  321. src16++;
  322. len--;
  323. }
  324. }
  325. } else {
  326. len = len / 4;
  327. src32 = (__le32 *)srcaddr;
  328. while (len) {
  329. iowrite32(le32_to_cpu(*src32), address);
  330. address += 4;
  331. src32++;
  332. len--;
  333. }
  334. }
  335. }
  336. #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
  337. CHIPCREGOFFS(reg), value)
  338. static void
  339. brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
  340. {
  341. const struct pci_dev *pdev = devinfo->pdev;
  342. struct brcmf_core *core;
  343. u32 bar0_win;
  344. core = brcmf_chip_get_core(devinfo->ci, coreid);
  345. if (core) {
  346. bar0_win = core->base;
  347. pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
  348. if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
  349. &bar0_win) == 0) {
  350. if (bar0_win != core->base) {
  351. bar0_win = core->base;
  352. pci_write_config_dword(pdev,
  353. BRCMF_PCIE_BAR0_WINDOW,
  354. bar0_win);
  355. }
  356. }
  357. } else {
  358. brcmf_err("Unsupported core selected %x\n", coreid);
  359. }
  360. }
  361. static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
  362. {
  363. u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
  364. BRCMF_PCIE_CFGREG_PM_CSR,
  365. BRCMF_PCIE_CFGREG_MSI_CAP,
  366. BRCMF_PCIE_CFGREG_MSI_ADDR_L,
  367. BRCMF_PCIE_CFGREG_MSI_ADDR_H,
  368. BRCMF_PCIE_CFGREG_MSI_DATA,
  369. BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
  370. BRCMF_PCIE_CFGREG_RBAR_CTRL,
  371. BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
  372. BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
  373. BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
  374. u32 i;
  375. u32 val;
  376. u32 lsc;
  377. if (!devinfo->ci)
  378. return;
  379. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  380. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
  381. BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL);
  382. lsc = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
  383. val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
  384. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, val);
  385. brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
  386. WRITECC32(devinfo, watchdog, 4);
  387. msleep(100);
  388. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  389. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
  390. BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL);
  391. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, lsc);
  392. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  393. for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
  394. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
  395. cfg_offset[i]);
  396. val = brcmf_pcie_read_reg32(devinfo,
  397. BRCMF_PCIE_PCIE2REG_CONFIGDATA);
  398. brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
  399. cfg_offset[i], val);
  400. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA,
  401. val);
  402. }
  403. }
  404. static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
  405. {
  406. u32 config;
  407. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  408. if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0)
  409. brcmf_pcie_reset_device(devinfo);
  410. /* BAR1 window may not be sized properly */
  411. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  412. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
  413. config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
  414. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
  415. device_wakeup_enable(&devinfo->pdev->dev);
  416. }
  417. static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
  418. {
  419. brcmf_chip_enter_download(devinfo->ci);
  420. if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
  421. brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
  422. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
  423. 5);
  424. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
  425. 0);
  426. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
  427. 7);
  428. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
  429. 0);
  430. }
  431. return 0;
  432. }
  433. static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
  434. u32 resetintr)
  435. {
  436. struct brcmf_core *core;
  437. if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
  438. core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
  439. brcmf_chip_resetcore(core, 0, 0, 0);
  440. }
  441. return !brcmf_chip_exit_download(devinfo->ci, resetintr);
  442. }
  443. static int
  444. brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
  445. {
  446. struct brcmf_pcie_shared_info *shared;
  447. u32 addr;
  448. u32 cur_htod_mb_data;
  449. u32 i;
  450. shared = &devinfo->shared;
  451. addr = shared->htod_mb_data_addr;
  452. cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  453. if (cur_htod_mb_data != 0)
  454. brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
  455. cur_htod_mb_data);
  456. i = 0;
  457. while (cur_htod_mb_data != 0) {
  458. msleep(10);
  459. i++;
  460. if (i > 100)
  461. return -EIO;
  462. cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  463. }
  464. brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
  465. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
  466. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
  467. return 0;
  468. }
  469. static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
  470. {
  471. struct brcmf_pcie_shared_info *shared;
  472. u32 addr;
  473. u32 dtoh_mb_data;
  474. shared = &devinfo->shared;
  475. addr = shared->dtoh_mb_data_addr;
  476. dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  477. if (!dtoh_mb_data)
  478. return;
  479. brcmf_pcie_write_tcm32(devinfo, addr, 0);
  480. brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
  481. if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
  482. brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
  483. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
  484. brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
  485. }
  486. if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
  487. brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
  488. if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
  489. brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
  490. if (waitqueue_active(&devinfo->mbdata_resp_wait)) {
  491. devinfo->mbdata_completed = true;
  492. wake_up(&devinfo->mbdata_resp_wait);
  493. }
  494. }
  495. }
  496. static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
  497. {
  498. struct brcmf_pcie_shared_info *shared;
  499. struct brcmf_pcie_console *console;
  500. u32 addr;
  501. shared = &devinfo->shared;
  502. console = &shared->console;
  503. addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
  504. console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  505. addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
  506. console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  507. addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
  508. console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
  509. brcmf_dbg(PCIE, "Console: base %x, buf %x, size %d\n",
  510. console->base_addr, console->buf_addr, console->bufsize);
  511. }
  512. static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
  513. {
  514. struct brcmf_pcie_console *console;
  515. u32 addr;
  516. u8 ch;
  517. u32 newidx;
  518. console = &devinfo->shared.console;
  519. addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
  520. newidx = brcmf_pcie_read_tcm32(devinfo, addr);
  521. while (newidx != console->read_idx) {
  522. addr = console->buf_addr + console->read_idx;
  523. ch = brcmf_pcie_read_tcm8(devinfo, addr);
  524. console->read_idx++;
  525. if (console->read_idx == console->bufsize)
  526. console->read_idx = 0;
  527. if (ch == '\r')
  528. continue;
  529. console->log_str[console->log_idx] = ch;
  530. console->log_idx++;
  531. if ((ch != '\n') &&
  532. (console->log_idx == (sizeof(console->log_str) - 2))) {
  533. ch = '\n';
  534. console->log_str[console->log_idx] = ch;
  535. console->log_idx++;
  536. }
  537. if (ch == '\n') {
  538. console->log_str[console->log_idx] = 0;
  539. brcmf_dbg(PCIE, "CONSOLE: %s\n", console->log_str);
  540. console->log_idx = 0;
  541. }
  542. }
  543. }
  544. static __used void brcmf_pcie_ringbell_v1(struct brcmf_pciedev_info *devinfo)
  545. {
  546. u32 reg_value;
  547. brcmf_dbg(PCIE, "RING !\n");
  548. reg_value = brcmf_pcie_read_reg32(devinfo,
  549. BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  550. reg_value |= BRCMF_PCIE2_INTB;
  551. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  552. reg_value);
  553. }
  554. static void brcmf_pcie_ringbell_v2(struct brcmf_pciedev_info *devinfo)
  555. {
  556. brcmf_dbg(PCIE, "RING !\n");
  557. /* Any arbitrary value will do, lets use 1 */
  558. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1);
  559. }
  560. static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
  561. {
  562. if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
  563. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
  564. 0);
  565. else
  566. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
  567. 0);
  568. }
  569. static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
  570. {
  571. if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
  572. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
  573. BRCMF_PCIE_INT_DEF);
  574. else
  575. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
  576. BRCMF_PCIE_MB_INT_D2H_DB |
  577. BRCMF_PCIE_MB_INT_FN0_0 |
  578. BRCMF_PCIE_MB_INT_FN0_1);
  579. }
  580. static irqreturn_t brcmf_pcie_quick_check_isr_v1(int irq, void *arg)
  581. {
  582. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  583. u32 status;
  584. status = 0;
  585. pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
  586. if (status) {
  587. brcmf_pcie_intr_disable(devinfo);
  588. brcmf_dbg(PCIE, "Enter\n");
  589. return IRQ_WAKE_THREAD;
  590. }
  591. return IRQ_NONE;
  592. }
  593. static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg)
  594. {
  595. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  596. if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
  597. brcmf_pcie_intr_disable(devinfo);
  598. brcmf_dbg(PCIE, "Enter\n");
  599. return IRQ_WAKE_THREAD;
  600. }
  601. return IRQ_NONE;
  602. }
  603. static irqreturn_t brcmf_pcie_isr_thread_v1(int irq, void *arg)
  604. {
  605. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  606. const struct pci_dev *pdev = devinfo->pdev;
  607. u32 status;
  608. devinfo->in_irq = true;
  609. status = 0;
  610. pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
  611. brcmf_dbg(PCIE, "Enter %x\n", status);
  612. if (status) {
  613. pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
  614. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  615. brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev);
  616. }
  617. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  618. brcmf_pcie_intr_enable(devinfo);
  619. devinfo->in_irq = false;
  620. return IRQ_HANDLED;
  621. }
  622. static irqreturn_t brcmf_pcie_isr_thread_v2(int irq, void *arg)
  623. {
  624. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  625. u32 status;
  626. devinfo->in_irq = true;
  627. status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  628. brcmf_dbg(PCIE, "Enter %x\n", status);
  629. if (status) {
  630. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  631. status);
  632. if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
  633. BRCMF_PCIE_MB_INT_FN0_1))
  634. brcmf_pcie_handle_mb_data(devinfo);
  635. if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
  636. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  637. brcmf_proto_msgbuf_rx_trigger(
  638. &devinfo->pdev->dev);
  639. }
  640. }
  641. brcmf_pcie_bus_console_read(devinfo);
  642. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  643. brcmf_pcie_intr_enable(devinfo);
  644. devinfo->in_irq = false;
  645. return IRQ_HANDLED;
  646. }
  647. static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
  648. {
  649. struct pci_dev *pdev;
  650. pdev = devinfo->pdev;
  651. brcmf_pcie_intr_disable(devinfo);
  652. brcmf_dbg(PCIE, "Enter\n");
  653. /* is it a v1 or v2 implementation */
  654. devinfo->irq_requested = false;
  655. pci_enable_msi(pdev);
  656. if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
  657. if (request_threaded_irq(pdev->irq,
  658. brcmf_pcie_quick_check_isr_v1,
  659. brcmf_pcie_isr_thread_v1,
  660. IRQF_SHARED, "brcmf_pcie_intr",
  661. devinfo)) {
  662. pci_disable_msi(pdev);
  663. brcmf_err("Failed to request IRQ %d\n", pdev->irq);
  664. return -EIO;
  665. }
  666. } else {
  667. if (request_threaded_irq(pdev->irq,
  668. brcmf_pcie_quick_check_isr_v2,
  669. brcmf_pcie_isr_thread_v2,
  670. IRQF_SHARED, "brcmf_pcie_intr",
  671. devinfo)) {
  672. pci_disable_msi(pdev);
  673. brcmf_err("Failed to request IRQ %d\n", pdev->irq);
  674. return -EIO;
  675. }
  676. }
  677. devinfo->irq_requested = true;
  678. devinfo->irq_allocated = true;
  679. return 0;
  680. }
  681. static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
  682. {
  683. struct pci_dev *pdev;
  684. u32 status;
  685. u32 count;
  686. if (!devinfo->irq_allocated)
  687. return;
  688. pdev = devinfo->pdev;
  689. brcmf_pcie_intr_disable(devinfo);
  690. if (!devinfo->irq_requested)
  691. return;
  692. devinfo->irq_requested = false;
  693. free_irq(pdev->irq, devinfo);
  694. pci_disable_msi(pdev);
  695. msleep(50);
  696. count = 0;
  697. while ((devinfo->in_irq) && (count < 20)) {
  698. msleep(50);
  699. count++;
  700. }
  701. if (devinfo->in_irq)
  702. brcmf_err("Still in IRQ (processing) !!!\n");
  703. if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
  704. status = 0;
  705. pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
  706. pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
  707. } else {
  708. status = brcmf_pcie_read_reg32(devinfo,
  709. BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  710. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  711. status);
  712. }
  713. devinfo->irq_allocated = false;
  714. }
  715. static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
  716. {
  717. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  718. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  719. struct brcmf_commonring *commonring = &ring->commonring;
  720. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  721. return -EIO;
  722. brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
  723. commonring->w_ptr, ring->id);
  724. brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr);
  725. return 0;
  726. }
  727. static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
  728. {
  729. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  730. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  731. struct brcmf_commonring *commonring = &ring->commonring;
  732. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  733. return -EIO;
  734. brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
  735. commonring->r_ptr, ring->id);
  736. brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr);
  737. return 0;
  738. }
  739. static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
  740. {
  741. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  742. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  743. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  744. return -EIO;
  745. devinfo->ringbell(devinfo);
  746. return 0;
  747. }
  748. static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
  749. {
  750. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  751. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  752. struct brcmf_commonring *commonring = &ring->commonring;
  753. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  754. return -EIO;
  755. commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr);
  756. brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
  757. commonring->w_ptr, ring->id);
  758. return 0;
  759. }
  760. static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
  761. {
  762. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  763. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  764. struct brcmf_commonring *commonring = &ring->commonring;
  765. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  766. return -EIO;
  767. commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr);
  768. brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
  769. commonring->r_ptr, ring->id);
  770. return 0;
  771. }
  772. static void *
  773. brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
  774. u32 size, u32 tcm_dma_phys_addr,
  775. dma_addr_t *dma_handle)
  776. {
  777. void *ring;
  778. long long address;
  779. ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
  780. GFP_KERNEL);
  781. if (!ring)
  782. return NULL;
  783. address = (long long)(long)*dma_handle;
  784. brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
  785. address & 0xffffffff);
  786. brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
  787. memset(ring, 0, size);
  788. return (ring);
  789. }
  790. static struct brcmf_pcie_ringbuf *
  791. brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
  792. u32 tcm_ring_phys_addr)
  793. {
  794. void *dma_buf;
  795. dma_addr_t dma_handle;
  796. struct brcmf_pcie_ringbuf *ring;
  797. u32 size;
  798. u32 addr;
  799. size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id];
  800. dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
  801. tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
  802. &dma_handle);
  803. if (!dma_buf)
  804. return NULL;
  805. addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
  806. brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
  807. addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
  808. brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]);
  809. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  810. if (!ring) {
  811. dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
  812. dma_handle);
  813. return NULL;
  814. }
  815. brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
  816. brcmf_ring_itemsize[ring_id], dma_buf);
  817. ring->dma_handle = dma_handle;
  818. ring->devinfo = devinfo;
  819. brcmf_commonring_register_cb(&ring->commonring,
  820. brcmf_pcie_ring_mb_ring_bell,
  821. brcmf_pcie_ring_mb_update_rptr,
  822. brcmf_pcie_ring_mb_update_wptr,
  823. brcmf_pcie_ring_mb_write_rptr,
  824. brcmf_pcie_ring_mb_write_wptr, ring);
  825. return (ring);
  826. }
  827. static void brcmf_pcie_release_ringbuffer(struct device *dev,
  828. struct brcmf_pcie_ringbuf *ring)
  829. {
  830. void *dma_buf;
  831. u32 size;
  832. if (!ring)
  833. return;
  834. dma_buf = ring->commonring.buf_addr;
  835. if (dma_buf) {
  836. size = ring->commonring.depth * ring->commonring.item_len;
  837. dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
  838. }
  839. kfree(ring);
  840. }
  841. static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
  842. {
  843. u32 i;
  844. for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
  845. brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
  846. devinfo->shared.commonrings[i]);
  847. devinfo->shared.commonrings[i] = NULL;
  848. }
  849. kfree(devinfo->shared.flowrings);
  850. devinfo->shared.flowrings = NULL;
  851. }
  852. static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
  853. {
  854. struct brcmf_pcie_ringbuf *ring;
  855. struct brcmf_pcie_ringbuf *rings;
  856. u32 ring_addr;
  857. u32 d2h_w_idx_ptr;
  858. u32 d2h_r_idx_ptr;
  859. u32 h2d_w_idx_ptr;
  860. u32 h2d_r_idx_ptr;
  861. u32 addr;
  862. u32 ring_mem_ptr;
  863. u32 i;
  864. u16 max_sub_queues;
  865. ring_addr = devinfo->shared.ring_info_addr;
  866. brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
  867. addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
  868. d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  869. addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
  870. d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  871. addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
  872. h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  873. addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
  874. h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  875. addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
  876. ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  877. for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
  878. ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
  879. if (!ring)
  880. goto fail;
  881. ring->w_idx_addr = h2d_w_idx_ptr;
  882. ring->r_idx_addr = h2d_r_idx_ptr;
  883. ring->id = i;
  884. devinfo->shared.commonrings[i] = ring;
  885. h2d_w_idx_ptr += sizeof(u32);
  886. h2d_r_idx_ptr += sizeof(u32);
  887. ring_mem_ptr += BRCMF_RING_MEM_SZ;
  888. }
  889. for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
  890. i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
  891. ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
  892. if (!ring)
  893. goto fail;
  894. ring->w_idx_addr = d2h_w_idx_ptr;
  895. ring->r_idx_addr = d2h_r_idx_ptr;
  896. ring->id = i;
  897. devinfo->shared.commonrings[i] = ring;
  898. d2h_w_idx_ptr += sizeof(u32);
  899. d2h_r_idx_ptr += sizeof(u32);
  900. ring_mem_ptr += BRCMF_RING_MEM_SZ;
  901. }
  902. addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
  903. max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
  904. devinfo->shared.nrof_flowrings =
  905. max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
  906. rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
  907. GFP_KERNEL);
  908. if (!rings)
  909. goto fail;
  910. brcmf_dbg(PCIE, "Nr of flowrings is %d\n",
  911. devinfo->shared.nrof_flowrings);
  912. for (i = 0; i < devinfo->shared.nrof_flowrings; i++) {
  913. ring = &rings[i];
  914. ring->devinfo = devinfo;
  915. ring->id = i + BRCMF_NROF_COMMON_MSGRINGS;
  916. brcmf_commonring_register_cb(&ring->commonring,
  917. brcmf_pcie_ring_mb_ring_bell,
  918. brcmf_pcie_ring_mb_update_rptr,
  919. brcmf_pcie_ring_mb_update_wptr,
  920. brcmf_pcie_ring_mb_write_rptr,
  921. brcmf_pcie_ring_mb_write_wptr,
  922. ring);
  923. ring->w_idx_addr = h2d_w_idx_ptr;
  924. ring->r_idx_addr = h2d_r_idx_ptr;
  925. h2d_w_idx_ptr += sizeof(u32);
  926. h2d_r_idx_ptr += sizeof(u32);
  927. }
  928. devinfo->shared.flowrings = rings;
  929. return 0;
  930. fail:
  931. brcmf_err("Allocating commonring buffers failed\n");
  932. brcmf_pcie_release_ringbuffers(devinfo);
  933. return -ENOMEM;
  934. }
  935. static void
  936. brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
  937. {
  938. if (devinfo->shared.scratch)
  939. dma_free_coherent(&devinfo->pdev->dev,
  940. BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
  941. devinfo->shared.scratch,
  942. devinfo->shared.scratch_dmahandle);
  943. if (devinfo->shared.ringupd)
  944. dma_free_coherent(&devinfo->pdev->dev,
  945. BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
  946. devinfo->shared.ringupd,
  947. devinfo->shared.ringupd_dmahandle);
  948. }
  949. static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
  950. {
  951. long long address;
  952. u32 addr;
  953. devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
  954. BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
  955. &devinfo->shared.scratch_dmahandle, GFP_KERNEL);
  956. if (!devinfo->shared.scratch)
  957. goto fail;
  958. memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
  959. brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
  960. addr = devinfo->shared.tcm_base_address +
  961. BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
  962. address = (long long)(long)devinfo->shared.scratch_dmahandle;
  963. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  964. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  965. addr = devinfo->shared.tcm_base_address +
  966. BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
  967. brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
  968. devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
  969. BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
  970. &devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
  971. if (!devinfo->shared.ringupd)
  972. goto fail;
  973. memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
  974. brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
  975. addr = devinfo->shared.tcm_base_address +
  976. BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
  977. address = (long long)(long)devinfo->shared.ringupd_dmahandle;
  978. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  979. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  980. addr = devinfo->shared.tcm_base_address +
  981. BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
  982. brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
  983. return 0;
  984. fail:
  985. brcmf_err("Allocating scratch buffers failed\n");
  986. brcmf_pcie_release_scratchbuffers(devinfo);
  987. return -ENOMEM;
  988. }
  989. static void brcmf_pcie_down(struct device *dev)
  990. {
  991. }
  992. static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
  993. {
  994. return 0;
  995. }
  996. static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
  997. uint len)
  998. {
  999. return 0;
  1000. }
  1001. static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
  1002. uint len)
  1003. {
  1004. return 0;
  1005. }
  1006. static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
  1007. {
  1008. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1009. struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
  1010. struct brcmf_pciedev_info *devinfo = buspub->devinfo;
  1011. brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
  1012. devinfo->wowl_enabled = enabled;
  1013. if (enabled)
  1014. device_set_wakeup_enable(&devinfo->pdev->dev, true);
  1015. else
  1016. device_set_wakeup_enable(&devinfo->pdev->dev, false);
  1017. }
  1018. static struct brcmf_bus_ops brcmf_pcie_bus_ops = {
  1019. .txdata = brcmf_pcie_tx,
  1020. .stop = brcmf_pcie_down,
  1021. .txctl = brcmf_pcie_tx_ctlpkt,
  1022. .rxctl = brcmf_pcie_rx_ctlpkt,
  1023. .wowl_config = brcmf_pcie_wowl_config,
  1024. };
  1025. static int
  1026. brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
  1027. u32 sharedram_addr)
  1028. {
  1029. struct brcmf_pcie_shared_info *shared;
  1030. u32 addr;
  1031. u32 version;
  1032. shared = &devinfo->shared;
  1033. shared->tcm_base_address = sharedram_addr;
  1034. shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
  1035. version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK;
  1036. brcmf_dbg(PCIE, "PCIe protocol version %d\n", version);
  1037. if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
  1038. (version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
  1039. brcmf_err("Unsupported PCIE version %d\n", version);
  1040. return -EINVAL;
  1041. }
  1042. if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) {
  1043. brcmf_err("Unsupported legacy TX mode 0x%x\n",
  1044. shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT);
  1045. return -EINVAL;
  1046. }
  1047. addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
  1048. shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
  1049. if (shared->max_rxbufpost == 0)
  1050. shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
  1051. addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
  1052. shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
  1053. addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
  1054. shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1055. addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
  1056. shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1057. addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
  1058. shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1059. brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
  1060. shared->max_rxbufpost, shared->rx_dataoffset);
  1061. brcmf_pcie_bus_console_init(devinfo);
  1062. return 0;
  1063. }
  1064. static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
  1065. {
  1066. char *fw_name;
  1067. char *nvram_name;
  1068. uint fw_len, nv_len;
  1069. char end;
  1070. brcmf_dbg(PCIE, "Enter, chip 0x%04x chiprev %d\n", devinfo->ci->chip,
  1071. devinfo->ci->chiprev);
  1072. switch (devinfo->ci->chip) {
  1073. case BRCM_CC_43602_CHIP_ID:
  1074. fw_name = BRCMF_PCIE_43602_FW_NAME;
  1075. nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
  1076. break;
  1077. case BRCM_CC_4354_CHIP_ID:
  1078. fw_name = BRCMF_PCIE_4354_FW_NAME;
  1079. nvram_name = BRCMF_PCIE_4354_NVRAM_NAME;
  1080. break;
  1081. case BRCM_CC_4356_CHIP_ID:
  1082. fw_name = BRCMF_PCIE_4356_FW_NAME;
  1083. nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
  1084. break;
  1085. case BRCM_CC_43567_CHIP_ID:
  1086. case BRCM_CC_43569_CHIP_ID:
  1087. case BRCM_CC_43570_CHIP_ID:
  1088. fw_name = BRCMF_PCIE_43570_FW_NAME;
  1089. nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
  1090. break;
  1091. default:
  1092. brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
  1093. return -ENODEV;
  1094. }
  1095. fw_len = sizeof(devinfo->fw_name) - 1;
  1096. nv_len = sizeof(devinfo->nvram_name) - 1;
  1097. /* check if firmware path is provided by module parameter */
  1098. if (brcmf_firmware_path[0] != '\0') {
  1099. strncpy(devinfo->fw_name, brcmf_firmware_path, fw_len);
  1100. strncpy(devinfo->nvram_name, brcmf_firmware_path, nv_len);
  1101. fw_len -= strlen(devinfo->fw_name);
  1102. nv_len -= strlen(devinfo->nvram_name);
  1103. end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
  1104. if (end != '/') {
  1105. strncat(devinfo->fw_name, "/", fw_len);
  1106. strncat(devinfo->nvram_name, "/", nv_len);
  1107. fw_len--;
  1108. nv_len--;
  1109. }
  1110. }
  1111. strncat(devinfo->fw_name, fw_name, fw_len);
  1112. strncat(devinfo->nvram_name, nvram_name, nv_len);
  1113. return 0;
  1114. }
  1115. static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
  1116. const struct firmware *fw, void *nvram,
  1117. u32 nvram_len)
  1118. {
  1119. u32 sharedram_addr;
  1120. u32 sharedram_addr_written;
  1121. u32 loop_counter;
  1122. int err;
  1123. u32 address;
  1124. u32 resetintr;
  1125. devinfo->ringbell = brcmf_pcie_ringbell_v2;
  1126. devinfo->generic_corerev = BRCMF_PCIE_GENREV2;
  1127. brcmf_dbg(PCIE, "Halt ARM.\n");
  1128. err = brcmf_pcie_enter_download_state(devinfo);
  1129. if (err)
  1130. return err;
  1131. brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
  1132. brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
  1133. (void *)fw->data, fw->size);
  1134. resetintr = get_unaligned_le32(fw->data);
  1135. release_firmware(fw);
  1136. /* reset last 4 bytes of RAM address. to be used for shared
  1137. * area. This identifies when FW is running
  1138. */
  1139. brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
  1140. if (nvram) {
  1141. brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
  1142. address = devinfo->ci->rambase + devinfo->ci->ramsize -
  1143. nvram_len;
  1144. brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
  1145. brcmf_fw_nvram_free(nvram);
  1146. } else {
  1147. brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
  1148. devinfo->nvram_name);
  1149. }
  1150. sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
  1151. devinfo->ci->ramsize -
  1152. 4);
  1153. brcmf_dbg(PCIE, "Bring ARM in running state\n");
  1154. err = brcmf_pcie_exit_download_state(devinfo, resetintr);
  1155. if (err)
  1156. return err;
  1157. brcmf_dbg(PCIE, "Wait for FW init\n");
  1158. sharedram_addr = sharedram_addr_written;
  1159. loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
  1160. while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
  1161. msleep(50);
  1162. sharedram_addr = brcmf_pcie_read_ram32(devinfo,
  1163. devinfo->ci->ramsize -
  1164. 4);
  1165. loop_counter--;
  1166. }
  1167. if (sharedram_addr == sharedram_addr_written) {
  1168. brcmf_err("FW failed to initialize\n");
  1169. return -ENODEV;
  1170. }
  1171. brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
  1172. return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
  1173. }
  1174. static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
  1175. {
  1176. struct pci_dev *pdev;
  1177. int err;
  1178. phys_addr_t bar0_addr, bar1_addr;
  1179. ulong bar1_size;
  1180. pdev = devinfo->pdev;
  1181. err = pci_enable_device(pdev);
  1182. if (err) {
  1183. brcmf_err("pci_enable_device failed err=%d\n", err);
  1184. return err;
  1185. }
  1186. pci_set_master(pdev);
  1187. /* Bar-0 mapped address */
  1188. bar0_addr = pci_resource_start(pdev, 0);
  1189. /* Bar-1 mapped address */
  1190. bar1_addr = pci_resource_start(pdev, 2);
  1191. /* read Bar-1 mapped memory range */
  1192. bar1_size = pci_resource_len(pdev, 2);
  1193. if ((bar1_size == 0) || (bar1_addr == 0)) {
  1194. brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
  1195. bar1_size, (unsigned long long)bar1_addr);
  1196. return -EINVAL;
  1197. }
  1198. devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
  1199. devinfo->tcm = ioremap_nocache(bar1_addr, BRCMF_PCIE_TCM_MAP_SIZE);
  1200. devinfo->tcm_size = BRCMF_PCIE_TCM_MAP_SIZE;
  1201. if (!devinfo->regs || !devinfo->tcm) {
  1202. brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs,
  1203. devinfo->tcm);
  1204. return -EINVAL;
  1205. }
  1206. brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
  1207. devinfo->regs, (unsigned long long)bar0_addr);
  1208. brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx\n",
  1209. devinfo->tcm, (unsigned long long)bar1_addr);
  1210. return 0;
  1211. }
  1212. static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
  1213. {
  1214. if (devinfo->tcm)
  1215. iounmap(devinfo->tcm);
  1216. if (devinfo->regs)
  1217. iounmap(devinfo->regs);
  1218. pci_disable_device(devinfo->pdev);
  1219. }
  1220. static int brcmf_pcie_attach_bus(struct device *dev)
  1221. {
  1222. int ret;
  1223. /* Attach to the common driver interface */
  1224. ret = brcmf_attach(dev);
  1225. if (ret) {
  1226. brcmf_err("brcmf_attach failed\n");
  1227. } else {
  1228. ret = brcmf_bus_start(dev);
  1229. if (ret)
  1230. brcmf_err("dongle is not responding\n");
  1231. }
  1232. return ret;
  1233. }
  1234. static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
  1235. {
  1236. u32 ret_addr;
  1237. ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
  1238. addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
  1239. pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
  1240. return ret_addr;
  1241. }
  1242. static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
  1243. {
  1244. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1245. addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
  1246. return brcmf_pcie_read_reg32(devinfo, addr);
  1247. }
  1248. static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
  1249. {
  1250. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1251. addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
  1252. brcmf_pcie_write_reg32(devinfo, addr, value);
  1253. }
  1254. static int brcmf_pcie_buscoreprep(void *ctx)
  1255. {
  1256. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1257. int err;
  1258. err = brcmf_pcie_get_resource(devinfo);
  1259. if (err == 0) {
  1260. /* Set CC watchdog to reset all the cores on the chip to bring
  1261. * back dongle to a sane state.
  1262. */
  1263. brcmf_pcie_buscore_write32(ctx, CORE_CC_REG(SI_ENUM_BASE,
  1264. watchdog), 4);
  1265. msleep(100);
  1266. }
  1267. return err;
  1268. }
  1269. static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
  1270. u32 rstvec)
  1271. {
  1272. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1273. brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
  1274. }
  1275. static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
  1276. .prepare = brcmf_pcie_buscoreprep,
  1277. .exit_dl = brcmf_pcie_buscore_exitdl,
  1278. .read32 = brcmf_pcie_buscore_read32,
  1279. .write32 = brcmf_pcie_buscore_write32,
  1280. };
  1281. static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
  1282. void *nvram, u32 nvram_len)
  1283. {
  1284. struct brcmf_bus *bus = dev_get_drvdata(dev);
  1285. struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
  1286. struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
  1287. struct brcmf_commonring **flowrings;
  1288. int ret;
  1289. u32 i;
  1290. brcmf_pcie_attach(devinfo);
  1291. ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
  1292. if (ret)
  1293. goto fail;
  1294. devinfo->state = BRCMFMAC_PCIE_STATE_UP;
  1295. ret = brcmf_pcie_init_ringbuffers(devinfo);
  1296. if (ret)
  1297. goto fail;
  1298. ret = brcmf_pcie_init_scratchbuffers(devinfo);
  1299. if (ret)
  1300. goto fail;
  1301. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  1302. ret = brcmf_pcie_request_irq(devinfo);
  1303. if (ret)
  1304. goto fail;
  1305. /* hook the commonrings in the bus structure. */
  1306. for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
  1307. bus->msgbuf->commonrings[i] =
  1308. &devinfo->shared.commonrings[i]->commonring;
  1309. flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings),
  1310. GFP_KERNEL);
  1311. if (!flowrings)
  1312. goto fail;
  1313. for (i = 0; i < devinfo->shared.nrof_flowrings; i++)
  1314. flowrings[i] = &devinfo->shared.flowrings[i].commonring;
  1315. bus->msgbuf->flowrings = flowrings;
  1316. bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
  1317. bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
  1318. bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings;
  1319. init_waitqueue_head(&devinfo->mbdata_resp_wait);
  1320. brcmf_pcie_intr_enable(devinfo);
  1321. if (brcmf_pcie_attach_bus(bus->dev) == 0)
  1322. return;
  1323. brcmf_pcie_bus_console_read(devinfo);
  1324. fail:
  1325. device_release_driver(dev);
  1326. }
  1327. static int
  1328. brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1329. {
  1330. int ret;
  1331. struct brcmf_pciedev_info *devinfo;
  1332. struct brcmf_pciedev *pcie_bus_dev;
  1333. struct brcmf_bus *bus;
  1334. brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
  1335. ret = -ENOMEM;
  1336. devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
  1337. if (devinfo == NULL)
  1338. return ret;
  1339. devinfo->pdev = pdev;
  1340. pcie_bus_dev = NULL;
  1341. devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
  1342. if (IS_ERR(devinfo->ci)) {
  1343. ret = PTR_ERR(devinfo->ci);
  1344. devinfo->ci = NULL;
  1345. goto fail;
  1346. }
  1347. pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
  1348. if (pcie_bus_dev == NULL) {
  1349. ret = -ENOMEM;
  1350. goto fail;
  1351. }
  1352. bus = kzalloc(sizeof(*bus), GFP_KERNEL);
  1353. if (!bus) {
  1354. ret = -ENOMEM;
  1355. goto fail;
  1356. }
  1357. bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
  1358. if (!bus->msgbuf) {
  1359. ret = -ENOMEM;
  1360. kfree(bus);
  1361. goto fail;
  1362. }
  1363. /* hook it all together. */
  1364. pcie_bus_dev->devinfo = devinfo;
  1365. pcie_bus_dev->bus = bus;
  1366. bus->dev = &pdev->dev;
  1367. bus->bus_priv.pcie = pcie_bus_dev;
  1368. bus->ops = &brcmf_pcie_bus_ops;
  1369. bus->proto_type = BRCMF_PROTO_MSGBUF;
  1370. bus->chip = devinfo->coreid;
  1371. bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
  1372. dev_set_drvdata(&pdev->dev, bus);
  1373. ret = brcmf_pcie_get_fwnames(devinfo);
  1374. if (ret)
  1375. goto fail_bus;
  1376. ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM |
  1377. BRCMF_FW_REQ_NV_OPTIONAL,
  1378. devinfo->fw_name, devinfo->nvram_name,
  1379. brcmf_pcie_setup);
  1380. if (ret == 0)
  1381. return 0;
  1382. fail_bus:
  1383. kfree(bus->msgbuf);
  1384. kfree(bus);
  1385. fail:
  1386. brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device);
  1387. brcmf_pcie_release_resource(devinfo);
  1388. if (devinfo->ci)
  1389. brcmf_chip_detach(devinfo->ci);
  1390. kfree(pcie_bus_dev);
  1391. kfree(devinfo);
  1392. return ret;
  1393. }
  1394. static void
  1395. brcmf_pcie_remove(struct pci_dev *pdev)
  1396. {
  1397. struct brcmf_pciedev_info *devinfo;
  1398. struct brcmf_bus *bus;
  1399. brcmf_dbg(PCIE, "Enter\n");
  1400. bus = dev_get_drvdata(&pdev->dev);
  1401. if (bus == NULL)
  1402. return;
  1403. devinfo = bus->bus_priv.pcie->devinfo;
  1404. devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
  1405. if (devinfo->ci)
  1406. brcmf_pcie_intr_disable(devinfo);
  1407. brcmf_detach(&pdev->dev);
  1408. kfree(bus->bus_priv.pcie);
  1409. kfree(bus->msgbuf->flowrings);
  1410. kfree(bus->msgbuf);
  1411. kfree(bus);
  1412. brcmf_pcie_release_irq(devinfo);
  1413. brcmf_pcie_release_scratchbuffers(devinfo);
  1414. brcmf_pcie_release_ringbuffers(devinfo);
  1415. brcmf_pcie_reset_device(devinfo);
  1416. brcmf_pcie_release_resource(devinfo);
  1417. if (devinfo->ci)
  1418. brcmf_chip_detach(devinfo->ci);
  1419. kfree(devinfo);
  1420. dev_set_drvdata(&pdev->dev, NULL);
  1421. }
  1422. #ifdef CONFIG_PM
  1423. static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
  1424. {
  1425. struct brcmf_pciedev_info *devinfo;
  1426. struct brcmf_bus *bus;
  1427. int err;
  1428. brcmf_dbg(PCIE, "Enter, state=%d, pdev=%p\n", state.event, pdev);
  1429. bus = dev_get_drvdata(&pdev->dev);
  1430. devinfo = bus->bus_priv.pcie->devinfo;
  1431. brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
  1432. devinfo->mbdata_completed = false;
  1433. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
  1434. wait_event_timeout(devinfo->mbdata_resp_wait,
  1435. devinfo->mbdata_completed,
  1436. msecs_to_jiffies(BRCMF_PCIE_MBDATA_TIMEOUT));
  1437. if (!devinfo->mbdata_completed) {
  1438. brcmf_err("Timeout on response for entering D3 substate\n");
  1439. return -EIO;
  1440. }
  1441. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM_IN_USE);
  1442. err = pci_save_state(pdev);
  1443. if (err)
  1444. brcmf_err("pci_save_state failed, err=%d\n", err);
  1445. if ((err) || (!devinfo->wowl_enabled)) {
  1446. brcmf_chip_detach(devinfo->ci);
  1447. devinfo->ci = NULL;
  1448. brcmf_pcie_remove(pdev);
  1449. return 0;
  1450. }
  1451. return pci_prepare_to_sleep(pdev);
  1452. }
  1453. static int brcmf_pcie_resume(struct pci_dev *pdev)
  1454. {
  1455. struct brcmf_pciedev_info *devinfo;
  1456. struct brcmf_bus *bus;
  1457. int err;
  1458. bus = dev_get_drvdata(&pdev->dev);
  1459. brcmf_dbg(PCIE, "Enter, pdev=%p, bus=%p\n", pdev, bus);
  1460. err = pci_set_power_state(pdev, PCI_D0);
  1461. if (err) {
  1462. brcmf_err("pci_set_power_state failed, err=%d\n", err);
  1463. goto cleanup;
  1464. }
  1465. pci_restore_state(pdev);
  1466. pci_enable_wake(pdev, PCI_D3hot, false);
  1467. pci_enable_wake(pdev, PCI_D3cold, false);
  1468. /* Check if device is still up and running, if so we are ready */
  1469. if (bus) {
  1470. devinfo = bus->bus_priv.pcie->devinfo;
  1471. if (brcmf_pcie_read_reg32(devinfo,
  1472. BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
  1473. if (brcmf_pcie_send_mb_data(devinfo,
  1474. BRCMF_H2D_HOST_D0_INFORM))
  1475. goto cleanup;
  1476. brcmf_dbg(PCIE, "Hot resume, continue....\n");
  1477. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  1478. brcmf_bus_change_state(bus, BRCMF_BUS_DATA);
  1479. brcmf_pcie_intr_enable(devinfo);
  1480. return 0;
  1481. }
  1482. }
  1483. cleanup:
  1484. if (bus) {
  1485. devinfo = bus->bus_priv.pcie->devinfo;
  1486. brcmf_chip_detach(devinfo->ci);
  1487. devinfo->ci = NULL;
  1488. brcmf_pcie_remove(pdev);
  1489. }
  1490. err = brcmf_pcie_probe(pdev, NULL);
  1491. if (err)
  1492. brcmf_err("probe after resume failed, err=%d\n", err);
  1493. return err;
  1494. }
  1495. #endif /* CONFIG_PM */
  1496. #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
  1497. PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
  1498. static struct pci_device_id brcmf_pcie_devid_table[] = {
  1499. BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_DEVICE_ID),
  1500. BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
  1501. BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
  1502. BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
  1503. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
  1504. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
  1505. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
  1506. { /* end: all zeroes */ }
  1507. };
  1508. MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
  1509. static struct pci_driver brcmf_pciedrvr = {
  1510. .node = {},
  1511. .name = KBUILD_MODNAME,
  1512. .id_table = brcmf_pcie_devid_table,
  1513. .probe = brcmf_pcie_probe,
  1514. .remove = brcmf_pcie_remove,
  1515. #ifdef CONFIG_PM
  1516. .suspend = brcmf_pcie_suspend,
  1517. .resume = brcmf_pcie_resume
  1518. #endif /* CONFIG_PM */
  1519. };
  1520. void brcmf_pcie_register(void)
  1521. {
  1522. int err;
  1523. brcmf_dbg(PCIE, "Enter\n");
  1524. err = pci_register_driver(&brcmf_pciedrvr);
  1525. if (err)
  1526. brcmf_err("PCIE driver registration failed, err=%d\n", err);
  1527. }
  1528. void brcmf_pcie_exit(void)
  1529. {
  1530. brcmf_dbg(PCIE, "Enter\n");
  1531. pci_unregister_driver(&brcmf_pciedrvr);
  1532. }