pcie.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891
  1. /* Copyright (c) 2014 Broadcom Corporation
  2. *
  3. * Permission to use, copy, modify, and/or distribute this software for any
  4. * purpose with or without fee is hereby granted, provided that the above
  5. * copyright notice and this permission notice appear in all copies.
  6. *
  7. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  8. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  9. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  10. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  12. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  13. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/firmware.h>
  18. #include <linux/pci.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/delay.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/bcma/bcma.h>
  23. #include <linux/sched.h>
  24. #include <asm/unaligned.h>
  25. #include <soc.h>
  26. #include <chipcommon.h>
  27. #include <brcmu_utils.h>
  28. #include <brcmu_wifi.h>
  29. #include <brcm_hw_ids.h>
  30. #include "debug.h"
  31. #include "bus.h"
  32. #include "commonring.h"
  33. #include "msgbuf.h"
  34. #include "pcie.h"
  35. #include "firmware.h"
  36. #include "chip.h"
  37. enum brcmf_pcie_state {
  38. BRCMFMAC_PCIE_STATE_DOWN,
  39. BRCMFMAC_PCIE_STATE_UP
  40. };
  41. #define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin"
  42. #define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt"
  43. #define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin"
  44. #define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
  45. #define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
  46. #define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt"
  47. #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
  48. #define BRCMF_PCIE_TCM_MAP_SIZE (4096 * 1024)
  49. #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
  50. /* backplane addres space accessed by BAR0 */
  51. #define BRCMF_PCIE_BAR0_WINDOW 0x80
  52. #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
  53. #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
  54. #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
  55. #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
  56. #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
  57. #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
  58. #define BRCMF_PCIE_REG_INTSTATUS 0x90
  59. #define BRCMF_PCIE_REG_INTMASK 0x94
  60. #define BRCMF_PCIE_REG_SBMBX 0x98
  61. #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
  62. #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
  63. #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
  64. #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
  65. #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
  66. #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140
  67. #define BRCMF_PCIE_GENREV1 1
  68. #define BRCMF_PCIE_GENREV2 2
  69. #define BRCMF_PCIE2_INTA 0x01
  70. #define BRCMF_PCIE2_INTB 0x02
  71. #define BRCMF_PCIE_INT_0 0x01
  72. #define BRCMF_PCIE_INT_1 0x02
  73. #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
  74. BRCMF_PCIE_INT_1)
  75. #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
  76. #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
  77. #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
  78. #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
  79. #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
  80. #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
  81. #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
  82. #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
  83. #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
  84. #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
  85. #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
  86. BRCMF_PCIE_MB_INT_D2H0_DB1 | \
  87. BRCMF_PCIE_MB_INT_D2H1_DB0 | \
  88. BRCMF_PCIE_MB_INT_D2H1_DB1 | \
  89. BRCMF_PCIE_MB_INT_D2H2_DB0 | \
  90. BRCMF_PCIE_MB_INT_D2H2_DB1 | \
  91. BRCMF_PCIE_MB_INT_D2H3_DB0 | \
  92. BRCMF_PCIE_MB_INT_D2H3_DB1)
  93. #define BRCMF_PCIE_MIN_SHARED_VERSION 4
  94. #define BRCMF_PCIE_MAX_SHARED_VERSION 5
  95. #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
  96. #define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000
  97. #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
  98. #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
  99. #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
  100. #define BRCMF_SHARED_RING_BASE_OFFSET 52
  101. #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
  102. #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
  103. #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
  104. #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
  105. #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
  106. #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
  107. #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
  108. #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
  109. #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
  110. #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
  111. #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
  112. #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
  113. #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
  114. #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
  115. #define BRCMF_RING_MAX_ITEM_OFFSET 4
  116. #define BRCMF_RING_LEN_ITEMS_OFFSET 6
  117. #define BRCMF_RING_MEM_SZ 16
  118. #define BRCMF_RING_STATE_SZ 8
  119. #define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET 4
  120. #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
  121. #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
  122. #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
  123. #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
  124. #define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
  125. #define BRCMF_DEF_MAX_RXBUFPOST 255
  126. #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
  127. #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
  128. #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
  129. #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
  130. #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
  131. #define BRCMF_D2H_DEV_D3_ACK 0x00000001
  132. #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
  133. #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
  134. #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
  135. #define BRCMF_H2D_HOST_DS_ACK 0x00000002
  136. #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
  137. #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
  138. #define BRCMF_PCIE_MBDATA_TIMEOUT 2000
  139. #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
  140. #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
  141. #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
  142. #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
  143. #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
  144. #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
  145. #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
  146. #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
  147. #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
  148. #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
  149. #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
  150. #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
  151. #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
  152. MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
  153. MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
  154. MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
  155. MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
  156. MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
  157. MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
  158. struct brcmf_pcie_console {
  159. u32 base_addr;
  160. u32 buf_addr;
  161. u32 bufsize;
  162. u32 read_idx;
  163. u8 log_str[256];
  164. u8 log_idx;
  165. };
  166. struct brcmf_pcie_shared_info {
  167. u32 tcm_base_address;
  168. u32 flags;
  169. struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
  170. struct brcmf_pcie_ringbuf *flowrings;
  171. u16 max_rxbufpost;
  172. u32 nrof_flowrings;
  173. u32 rx_dataoffset;
  174. u32 htod_mb_data_addr;
  175. u32 dtoh_mb_data_addr;
  176. u32 ring_info_addr;
  177. struct brcmf_pcie_console console;
  178. void *scratch;
  179. dma_addr_t scratch_dmahandle;
  180. void *ringupd;
  181. dma_addr_t ringupd_dmahandle;
  182. };
  183. struct brcmf_pcie_core_info {
  184. u32 base;
  185. u32 wrapbase;
  186. };
  187. struct brcmf_pciedev_info {
  188. enum brcmf_pcie_state state;
  189. bool in_irq;
  190. bool irq_requested;
  191. struct pci_dev *pdev;
  192. char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
  193. char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
  194. void __iomem *regs;
  195. void __iomem *tcm;
  196. u32 tcm_size;
  197. u32 ram_base;
  198. u32 ram_size;
  199. struct brcmf_chip *ci;
  200. u32 coreid;
  201. u32 generic_corerev;
  202. struct brcmf_pcie_shared_info shared;
  203. void (*ringbell)(struct brcmf_pciedev_info *devinfo);
  204. wait_queue_head_t mbdata_resp_wait;
  205. bool mbdata_completed;
  206. bool irq_allocated;
  207. bool wowl_enabled;
  208. };
  209. struct brcmf_pcie_ringbuf {
  210. struct brcmf_commonring commonring;
  211. dma_addr_t dma_handle;
  212. u32 w_idx_addr;
  213. u32 r_idx_addr;
  214. struct brcmf_pciedev_info *devinfo;
  215. u8 id;
  216. };
  217. static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
  218. BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
  219. BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
  220. BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
  221. BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
  222. BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
  223. };
  224. static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
  225. BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
  226. BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
  227. BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
  228. BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
  229. BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
  230. };
  231. /* dma flushing needs implementation for mips and arm platforms. Should
  232. * be put in util. Note, this is not real flushing. It is virtual non
  233. * cached memory. Only write buffers should have to be drained. Though
  234. * this may be different depending on platform......
  235. */
  236. #define brcmf_dma_flush(addr, len)
  237. #define brcmf_dma_invalidate_cache(addr, len)
  238. static u32
  239. brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
  240. {
  241. void __iomem *address = devinfo->regs + reg_offset;
  242. return (ioread32(address));
  243. }
  244. static void
  245. brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
  246. u32 value)
  247. {
  248. void __iomem *address = devinfo->regs + reg_offset;
  249. iowrite32(value, address);
  250. }
  251. static u8
  252. brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  253. {
  254. void __iomem *address = devinfo->tcm + mem_offset;
  255. return (ioread8(address));
  256. }
  257. static u16
  258. brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  259. {
  260. void __iomem *address = devinfo->tcm + mem_offset;
  261. return (ioread16(address));
  262. }
  263. static void
  264. brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  265. u16 value)
  266. {
  267. void __iomem *address = devinfo->tcm + mem_offset;
  268. iowrite16(value, address);
  269. }
  270. static u32
  271. brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  272. {
  273. void __iomem *address = devinfo->tcm + mem_offset;
  274. return (ioread32(address));
  275. }
  276. static void
  277. brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  278. u32 value)
  279. {
  280. void __iomem *address = devinfo->tcm + mem_offset;
  281. iowrite32(value, address);
  282. }
  283. static u32
  284. brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  285. {
  286. void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
  287. return (ioread32(addr));
  288. }
  289. static void
  290. brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  291. u32 value)
  292. {
  293. void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
  294. iowrite32(value, addr);
  295. }
  296. static void
  297. brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  298. void *srcaddr, u32 len)
  299. {
  300. void __iomem *address = devinfo->tcm + mem_offset;
  301. __le32 *src32;
  302. __le16 *src16;
  303. u8 *src8;
  304. if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
  305. if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
  306. src8 = (u8 *)srcaddr;
  307. while (len) {
  308. iowrite8(*src8, address);
  309. address++;
  310. src8++;
  311. len--;
  312. }
  313. } else {
  314. len = len / 2;
  315. src16 = (__le16 *)srcaddr;
  316. while (len) {
  317. iowrite16(le16_to_cpu(*src16), address);
  318. address += 2;
  319. src16++;
  320. len--;
  321. }
  322. }
  323. } else {
  324. len = len / 4;
  325. src32 = (__le32 *)srcaddr;
  326. while (len) {
  327. iowrite32(le32_to_cpu(*src32), address);
  328. address += 4;
  329. src32++;
  330. len--;
  331. }
  332. }
  333. }
  334. #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
  335. CHIPCREGOFFS(reg), value)
  336. static void
  337. brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
  338. {
  339. const struct pci_dev *pdev = devinfo->pdev;
  340. struct brcmf_core *core;
  341. u32 bar0_win;
  342. core = brcmf_chip_get_core(devinfo->ci, coreid);
  343. if (core) {
  344. bar0_win = core->base;
  345. pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
  346. if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
  347. &bar0_win) == 0) {
  348. if (bar0_win != core->base) {
  349. bar0_win = core->base;
  350. pci_write_config_dword(pdev,
  351. BRCMF_PCIE_BAR0_WINDOW,
  352. bar0_win);
  353. }
  354. }
  355. } else {
  356. brcmf_err("Unsupported core selected %x\n", coreid);
  357. }
  358. }
  359. static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
  360. {
  361. u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
  362. BRCMF_PCIE_CFGREG_PM_CSR,
  363. BRCMF_PCIE_CFGREG_MSI_CAP,
  364. BRCMF_PCIE_CFGREG_MSI_ADDR_L,
  365. BRCMF_PCIE_CFGREG_MSI_ADDR_H,
  366. BRCMF_PCIE_CFGREG_MSI_DATA,
  367. BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
  368. BRCMF_PCIE_CFGREG_RBAR_CTRL,
  369. BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
  370. BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
  371. BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
  372. u32 i;
  373. u32 val;
  374. u32 lsc;
  375. if (!devinfo->ci)
  376. return;
  377. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  378. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
  379. BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL);
  380. lsc = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
  381. val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
  382. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, val);
  383. brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
  384. WRITECC32(devinfo, watchdog, 4);
  385. msleep(100);
  386. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  387. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
  388. BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL);
  389. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, lsc);
  390. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  391. for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
  392. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
  393. cfg_offset[i]);
  394. val = brcmf_pcie_read_reg32(devinfo,
  395. BRCMF_PCIE_PCIE2REG_CONFIGDATA);
  396. brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
  397. cfg_offset[i], val);
  398. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA,
  399. val);
  400. }
  401. }
  402. static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
  403. {
  404. u32 config;
  405. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  406. if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0)
  407. brcmf_pcie_reset_device(devinfo);
  408. /* BAR1 window may not be sized properly */
  409. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  410. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
  411. config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
  412. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
  413. device_wakeup_enable(&devinfo->pdev->dev);
  414. }
  415. static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
  416. {
  417. if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
  418. brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
  419. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
  420. 5);
  421. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
  422. 0);
  423. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
  424. 7);
  425. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
  426. 0);
  427. }
  428. return 0;
  429. }
  430. static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
  431. u32 resetintr)
  432. {
  433. struct brcmf_core *core;
  434. if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
  435. core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
  436. brcmf_chip_resetcore(core, 0, 0, 0);
  437. }
  438. return !brcmf_chip_set_active(devinfo->ci, resetintr);
  439. }
  440. static int
  441. brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
  442. {
  443. struct brcmf_pcie_shared_info *shared;
  444. u32 addr;
  445. u32 cur_htod_mb_data;
  446. u32 i;
  447. shared = &devinfo->shared;
  448. addr = shared->htod_mb_data_addr;
  449. cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  450. if (cur_htod_mb_data != 0)
  451. brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
  452. cur_htod_mb_data);
  453. i = 0;
  454. while (cur_htod_mb_data != 0) {
  455. msleep(10);
  456. i++;
  457. if (i > 100)
  458. return -EIO;
  459. cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  460. }
  461. brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
  462. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
  463. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
  464. return 0;
  465. }
  466. static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
  467. {
  468. struct brcmf_pcie_shared_info *shared;
  469. u32 addr;
  470. u32 dtoh_mb_data;
  471. shared = &devinfo->shared;
  472. addr = shared->dtoh_mb_data_addr;
  473. dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  474. if (!dtoh_mb_data)
  475. return;
  476. brcmf_pcie_write_tcm32(devinfo, addr, 0);
  477. brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
  478. if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
  479. brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
  480. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
  481. brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
  482. }
  483. if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
  484. brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
  485. if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
  486. brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
  487. if (waitqueue_active(&devinfo->mbdata_resp_wait)) {
  488. devinfo->mbdata_completed = true;
  489. wake_up(&devinfo->mbdata_resp_wait);
  490. }
  491. }
  492. }
  493. static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
  494. {
  495. struct brcmf_pcie_shared_info *shared;
  496. struct brcmf_pcie_console *console;
  497. u32 addr;
  498. shared = &devinfo->shared;
  499. console = &shared->console;
  500. addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
  501. console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  502. addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
  503. console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  504. addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
  505. console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
  506. brcmf_dbg(PCIE, "Console: base %x, buf %x, size %d\n",
  507. console->base_addr, console->buf_addr, console->bufsize);
  508. }
  509. static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
  510. {
  511. struct brcmf_pcie_console *console;
  512. u32 addr;
  513. u8 ch;
  514. u32 newidx;
  515. console = &devinfo->shared.console;
  516. addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
  517. newidx = brcmf_pcie_read_tcm32(devinfo, addr);
  518. while (newidx != console->read_idx) {
  519. addr = console->buf_addr + console->read_idx;
  520. ch = brcmf_pcie_read_tcm8(devinfo, addr);
  521. console->read_idx++;
  522. if (console->read_idx == console->bufsize)
  523. console->read_idx = 0;
  524. if (ch == '\r')
  525. continue;
  526. console->log_str[console->log_idx] = ch;
  527. console->log_idx++;
  528. if ((ch != '\n') &&
  529. (console->log_idx == (sizeof(console->log_str) - 2))) {
  530. ch = '\n';
  531. console->log_str[console->log_idx] = ch;
  532. console->log_idx++;
  533. }
  534. if (ch == '\n') {
  535. console->log_str[console->log_idx] = 0;
  536. brcmf_dbg(PCIE, "CONSOLE: %s", console->log_str);
  537. console->log_idx = 0;
  538. }
  539. }
  540. }
  541. static __used void brcmf_pcie_ringbell_v1(struct brcmf_pciedev_info *devinfo)
  542. {
  543. u32 reg_value;
  544. brcmf_dbg(PCIE, "RING !\n");
  545. reg_value = brcmf_pcie_read_reg32(devinfo,
  546. BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  547. reg_value |= BRCMF_PCIE2_INTB;
  548. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  549. reg_value);
  550. }
  551. static void brcmf_pcie_ringbell_v2(struct brcmf_pciedev_info *devinfo)
  552. {
  553. brcmf_dbg(PCIE, "RING !\n");
  554. /* Any arbitrary value will do, lets use 1 */
  555. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1);
  556. }
  557. static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
  558. {
  559. if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
  560. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
  561. 0);
  562. else
  563. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
  564. 0);
  565. }
  566. static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
  567. {
  568. if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
  569. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
  570. BRCMF_PCIE_INT_DEF);
  571. else
  572. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
  573. BRCMF_PCIE_MB_INT_D2H_DB |
  574. BRCMF_PCIE_MB_INT_FN0_0 |
  575. BRCMF_PCIE_MB_INT_FN0_1);
  576. }
  577. static irqreturn_t brcmf_pcie_quick_check_isr_v1(int irq, void *arg)
  578. {
  579. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  580. u32 status;
  581. status = 0;
  582. pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
  583. if (status) {
  584. brcmf_pcie_intr_disable(devinfo);
  585. brcmf_dbg(PCIE, "Enter\n");
  586. return IRQ_WAKE_THREAD;
  587. }
  588. return IRQ_NONE;
  589. }
  590. static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg)
  591. {
  592. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  593. if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
  594. brcmf_pcie_intr_disable(devinfo);
  595. brcmf_dbg(PCIE, "Enter\n");
  596. return IRQ_WAKE_THREAD;
  597. }
  598. return IRQ_NONE;
  599. }
  600. static irqreturn_t brcmf_pcie_isr_thread_v1(int irq, void *arg)
  601. {
  602. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  603. const struct pci_dev *pdev = devinfo->pdev;
  604. u32 status;
  605. devinfo->in_irq = true;
  606. status = 0;
  607. pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
  608. brcmf_dbg(PCIE, "Enter %x\n", status);
  609. if (status) {
  610. pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
  611. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  612. brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev);
  613. }
  614. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  615. brcmf_pcie_intr_enable(devinfo);
  616. devinfo->in_irq = false;
  617. return IRQ_HANDLED;
  618. }
  619. static irqreturn_t brcmf_pcie_isr_thread_v2(int irq, void *arg)
  620. {
  621. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  622. u32 status;
  623. devinfo->in_irq = true;
  624. status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  625. brcmf_dbg(PCIE, "Enter %x\n", status);
  626. if (status) {
  627. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  628. status);
  629. if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
  630. BRCMF_PCIE_MB_INT_FN0_1))
  631. brcmf_pcie_handle_mb_data(devinfo);
  632. if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
  633. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  634. brcmf_proto_msgbuf_rx_trigger(
  635. &devinfo->pdev->dev);
  636. }
  637. }
  638. brcmf_pcie_bus_console_read(devinfo);
  639. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  640. brcmf_pcie_intr_enable(devinfo);
  641. devinfo->in_irq = false;
  642. return IRQ_HANDLED;
  643. }
  644. static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
  645. {
  646. struct pci_dev *pdev;
  647. pdev = devinfo->pdev;
  648. brcmf_pcie_intr_disable(devinfo);
  649. brcmf_dbg(PCIE, "Enter\n");
  650. /* is it a v1 or v2 implementation */
  651. devinfo->irq_requested = false;
  652. pci_enable_msi(pdev);
  653. if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
  654. if (request_threaded_irq(pdev->irq,
  655. brcmf_pcie_quick_check_isr_v1,
  656. brcmf_pcie_isr_thread_v1,
  657. IRQF_SHARED, "brcmf_pcie_intr",
  658. devinfo)) {
  659. pci_disable_msi(pdev);
  660. brcmf_err("Failed to request IRQ %d\n", pdev->irq);
  661. return -EIO;
  662. }
  663. } else {
  664. if (request_threaded_irq(pdev->irq,
  665. brcmf_pcie_quick_check_isr_v2,
  666. brcmf_pcie_isr_thread_v2,
  667. IRQF_SHARED, "brcmf_pcie_intr",
  668. devinfo)) {
  669. pci_disable_msi(pdev);
  670. brcmf_err("Failed to request IRQ %d\n", pdev->irq);
  671. return -EIO;
  672. }
  673. }
  674. devinfo->irq_requested = true;
  675. devinfo->irq_allocated = true;
  676. return 0;
  677. }
  678. static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
  679. {
  680. struct pci_dev *pdev;
  681. u32 status;
  682. u32 count;
  683. if (!devinfo->irq_allocated)
  684. return;
  685. pdev = devinfo->pdev;
  686. brcmf_pcie_intr_disable(devinfo);
  687. if (!devinfo->irq_requested)
  688. return;
  689. devinfo->irq_requested = false;
  690. free_irq(pdev->irq, devinfo);
  691. pci_disable_msi(pdev);
  692. msleep(50);
  693. count = 0;
  694. while ((devinfo->in_irq) && (count < 20)) {
  695. msleep(50);
  696. count++;
  697. }
  698. if (devinfo->in_irq)
  699. brcmf_err("Still in IRQ (processing) !!!\n");
  700. if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
  701. status = 0;
  702. pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
  703. pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
  704. } else {
  705. status = brcmf_pcie_read_reg32(devinfo,
  706. BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  707. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  708. status);
  709. }
  710. devinfo->irq_allocated = false;
  711. }
  712. static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
  713. {
  714. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  715. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  716. struct brcmf_commonring *commonring = &ring->commonring;
  717. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  718. return -EIO;
  719. brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
  720. commonring->w_ptr, ring->id);
  721. brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr);
  722. return 0;
  723. }
  724. static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
  725. {
  726. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  727. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  728. struct brcmf_commonring *commonring = &ring->commonring;
  729. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  730. return -EIO;
  731. brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
  732. commonring->r_ptr, ring->id);
  733. brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr);
  734. return 0;
  735. }
  736. static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
  737. {
  738. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  739. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  740. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  741. return -EIO;
  742. devinfo->ringbell(devinfo);
  743. return 0;
  744. }
  745. static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
  746. {
  747. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  748. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  749. struct brcmf_commonring *commonring = &ring->commonring;
  750. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  751. return -EIO;
  752. commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr);
  753. brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
  754. commonring->w_ptr, ring->id);
  755. return 0;
  756. }
  757. static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
  758. {
  759. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  760. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  761. struct brcmf_commonring *commonring = &ring->commonring;
  762. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  763. return -EIO;
  764. commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr);
  765. brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
  766. commonring->r_ptr, ring->id);
  767. return 0;
  768. }
  769. static void *
  770. brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
  771. u32 size, u32 tcm_dma_phys_addr,
  772. dma_addr_t *dma_handle)
  773. {
  774. void *ring;
  775. u64 address;
  776. ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
  777. GFP_KERNEL);
  778. if (!ring)
  779. return NULL;
  780. address = (u64)*dma_handle;
  781. brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
  782. address & 0xffffffff);
  783. brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
  784. memset(ring, 0, size);
  785. return (ring);
  786. }
  787. static struct brcmf_pcie_ringbuf *
  788. brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
  789. u32 tcm_ring_phys_addr)
  790. {
  791. void *dma_buf;
  792. dma_addr_t dma_handle;
  793. struct brcmf_pcie_ringbuf *ring;
  794. u32 size;
  795. u32 addr;
  796. size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id];
  797. dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
  798. tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
  799. &dma_handle);
  800. if (!dma_buf)
  801. return NULL;
  802. addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
  803. brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
  804. addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
  805. brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]);
  806. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  807. if (!ring) {
  808. dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
  809. dma_handle);
  810. return NULL;
  811. }
  812. brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
  813. brcmf_ring_itemsize[ring_id], dma_buf);
  814. ring->dma_handle = dma_handle;
  815. ring->devinfo = devinfo;
  816. brcmf_commonring_register_cb(&ring->commonring,
  817. brcmf_pcie_ring_mb_ring_bell,
  818. brcmf_pcie_ring_mb_update_rptr,
  819. brcmf_pcie_ring_mb_update_wptr,
  820. brcmf_pcie_ring_mb_write_rptr,
  821. brcmf_pcie_ring_mb_write_wptr, ring);
  822. return (ring);
  823. }
  824. static void brcmf_pcie_release_ringbuffer(struct device *dev,
  825. struct brcmf_pcie_ringbuf *ring)
  826. {
  827. void *dma_buf;
  828. u32 size;
  829. if (!ring)
  830. return;
  831. dma_buf = ring->commonring.buf_addr;
  832. if (dma_buf) {
  833. size = ring->commonring.depth * ring->commonring.item_len;
  834. dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
  835. }
  836. kfree(ring);
  837. }
  838. static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
  839. {
  840. u32 i;
  841. for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
  842. brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
  843. devinfo->shared.commonrings[i]);
  844. devinfo->shared.commonrings[i] = NULL;
  845. }
  846. kfree(devinfo->shared.flowrings);
  847. devinfo->shared.flowrings = NULL;
  848. }
  849. static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
  850. {
  851. struct brcmf_pcie_ringbuf *ring;
  852. struct brcmf_pcie_ringbuf *rings;
  853. u32 ring_addr;
  854. u32 d2h_w_idx_ptr;
  855. u32 d2h_r_idx_ptr;
  856. u32 h2d_w_idx_ptr;
  857. u32 h2d_r_idx_ptr;
  858. u32 addr;
  859. u32 ring_mem_ptr;
  860. u32 i;
  861. u16 max_sub_queues;
  862. ring_addr = devinfo->shared.ring_info_addr;
  863. brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
  864. addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
  865. d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  866. addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
  867. d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  868. addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
  869. h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  870. addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
  871. h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  872. addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
  873. ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  874. for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
  875. ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
  876. if (!ring)
  877. goto fail;
  878. ring->w_idx_addr = h2d_w_idx_ptr;
  879. ring->r_idx_addr = h2d_r_idx_ptr;
  880. ring->id = i;
  881. devinfo->shared.commonrings[i] = ring;
  882. h2d_w_idx_ptr += sizeof(u32);
  883. h2d_r_idx_ptr += sizeof(u32);
  884. ring_mem_ptr += BRCMF_RING_MEM_SZ;
  885. }
  886. for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
  887. i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
  888. ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
  889. if (!ring)
  890. goto fail;
  891. ring->w_idx_addr = d2h_w_idx_ptr;
  892. ring->r_idx_addr = d2h_r_idx_ptr;
  893. ring->id = i;
  894. devinfo->shared.commonrings[i] = ring;
  895. d2h_w_idx_ptr += sizeof(u32);
  896. d2h_r_idx_ptr += sizeof(u32);
  897. ring_mem_ptr += BRCMF_RING_MEM_SZ;
  898. }
  899. addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
  900. max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
  901. devinfo->shared.nrof_flowrings =
  902. max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
  903. rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
  904. GFP_KERNEL);
  905. if (!rings)
  906. goto fail;
  907. brcmf_dbg(PCIE, "Nr of flowrings is %d\n",
  908. devinfo->shared.nrof_flowrings);
  909. for (i = 0; i < devinfo->shared.nrof_flowrings; i++) {
  910. ring = &rings[i];
  911. ring->devinfo = devinfo;
  912. ring->id = i + BRCMF_NROF_COMMON_MSGRINGS;
  913. brcmf_commonring_register_cb(&ring->commonring,
  914. brcmf_pcie_ring_mb_ring_bell,
  915. brcmf_pcie_ring_mb_update_rptr,
  916. brcmf_pcie_ring_mb_update_wptr,
  917. brcmf_pcie_ring_mb_write_rptr,
  918. brcmf_pcie_ring_mb_write_wptr,
  919. ring);
  920. ring->w_idx_addr = h2d_w_idx_ptr;
  921. ring->r_idx_addr = h2d_r_idx_ptr;
  922. h2d_w_idx_ptr += sizeof(u32);
  923. h2d_r_idx_ptr += sizeof(u32);
  924. }
  925. devinfo->shared.flowrings = rings;
  926. return 0;
  927. fail:
  928. brcmf_err("Allocating commonring buffers failed\n");
  929. brcmf_pcie_release_ringbuffers(devinfo);
  930. return -ENOMEM;
  931. }
  932. static void
  933. brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
  934. {
  935. if (devinfo->shared.scratch)
  936. dma_free_coherent(&devinfo->pdev->dev,
  937. BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
  938. devinfo->shared.scratch,
  939. devinfo->shared.scratch_dmahandle);
  940. if (devinfo->shared.ringupd)
  941. dma_free_coherent(&devinfo->pdev->dev,
  942. BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
  943. devinfo->shared.ringupd,
  944. devinfo->shared.ringupd_dmahandle);
  945. }
  946. static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
  947. {
  948. u64 address;
  949. u32 addr;
  950. devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
  951. BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
  952. &devinfo->shared.scratch_dmahandle, GFP_KERNEL);
  953. if (!devinfo->shared.scratch)
  954. goto fail;
  955. memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
  956. brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
  957. addr = devinfo->shared.tcm_base_address +
  958. BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
  959. address = (u64)devinfo->shared.scratch_dmahandle;
  960. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  961. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  962. addr = devinfo->shared.tcm_base_address +
  963. BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
  964. brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
  965. devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
  966. BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
  967. &devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
  968. if (!devinfo->shared.ringupd)
  969. goto fail;
  970. memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
  971. brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
  972. addr = devinfo->shared.tcm_base_address +
  973. BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
  974. address = (u64)devinfo->shared.ringupd_dmahandle;
  975. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  976. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  977. addr = devinfo->shared.tcm_base_address +
  978. BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
  979. brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
  980. return 0;
  981. fail:
  982. brcmf_err("Allocating scratch buffers failed\n");
  983. brcmf_pcie_release_scratchbuffers(devinfo);
  984. return -ENOMEM;
  985. }
  986. static void brcmf_pcie_down(struct device *dev)
  987. {
  988. }
  989. static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
  990. {
  991. return 0;
  992. }
  993. static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
  994. uint len)
  995. {
  996. return 0;
  997. }
  998. static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
  999. uint len)
  1000. {
  1001. return 0;
  1002. }
  1003. static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
  1004. {
  1005. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1006. struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
  1007. struct brcmf_pciedev_info *devinfo = buspub->devinfo;
  1008. brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
  1009. devinfo->wowl_enabled = enabled;
  1010. if (enabled)
  1011. device_set_wakeup_enable(&devinfo->pdev->dev, true);
  1012. else
  1013. device_set_wakeup_enable(&devinfo->pdev->dev, false);
  1014. }
  1015. static struct brcmf_bus_ops brcmf_pcie_bus_ops = {
  1016. .txdata = brcmf_pcie_tx,
  1017. .stop = brcmf_pcie_down,
  1018. .txctl = brcmf_pcie_tx_ctlpkt,
  1019. .rxctl = brcmf_pcie_rx_ctlpkt,
  1020. .wowl_config = brcmf_pcie_wowl_config,
  1021. };
  1022. static int
  1023. brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
  1024. u32 sharedram_addr)
  1025. {
  1026. struct brcmf_pcie_shared_info *shared;
  1027. u32 addr;
  1028. u32 version;
  1029. shared = &devinfo->shared;
  1030. shared->tcm_base_address = sharedram_addr;
  1031. shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
  1032. version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK;
  1033. brcmf_dbg(PCIE, "PCIe protocol version %d\n", version);
  1034. if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
  1035. (version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
  1036. brcmf_err("Unsupported PCIE version %d\n", version);
  1037. return -EINVAL;
  1038. }
  1039. if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) {
  1040. brcmf_err("Unsupported legacy TX mode 0x%x\n",
  1041. shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT);
  1042. return -EINVAL;
  1043. }
  1044. addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
  1045. shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
  1046. if (shared->max_rxbufpost == 0)
  1047. shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
  1048. addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
  1049. shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
  1050. addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
  1051. shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1052. addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
  1053. shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1054. addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
  1055. shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1056. brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
  1057. shared->max_rxbufpost, shared->rx_dataoffset);
  1058. brcmf_pcie_bus_console_init(devinfo);
  1059. return 0;
  1060. }
  1061. static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
  1062. {
  1063. char *fw_name;
  1064. char *nvram_name;
  1065. uint fw_len, nv_len;
  1066. char end;
  1067. brcmf_dbg(PCIE, "Enter, chip 0x%04x chiprev %d\n", devinfo->ci->chip,
  1068. devinfo->ci->chiprev);
  1069. switch (devinfo->ci->chip) {
  1070. case BRCM_CC_43602_CHIP_ID:
  1071. fw_name = BRCMF_PCIE_43602_FW_NAME;
  1072. nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
  1073. break;
  1074. case BRCM_CC_4356_CHIP_ID:
  1075. fw_name = BRCMF_PCIE_4356_FW_NAME;
  1076. nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
  1077. break;
  1078. case BRCM_CC_43567_CHIP_ID:
  1079. case BRCM_CC_43569_CHIP_ID:
  1080. case BRCM_CC_43570_CHIP_ID:
  1081. fw_name = BRCMF_PCIE_43570_FW_NAME;
  1082. nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
  1083. break;
  1084. default:
  1085. brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
  1086. return -ENODEV;
  1087. }
  1088. fw_len = sizeof(devinfo->fw_name) - 1;
  1089. nv_len = sizeof(devinfo->nvram_name) - 1;
  1090. /* check if firmware path is provided by module parameter */
  1091. if (brcmf_firmware_path[0] != '\0') {
  1092. strncpy(devinfo->fw_name, brcmf_firmware_path, fw_len);
  1093. strncpy(devinfo->nvram_name, brcmf_firmware_path, nv_len);
  1094. fw_len -= strlen(devinfo->fw_name);
  1095. nv_len -= strlen(devinfo->nvram_name);
  1096. end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
  1097. if (end != '/') {
  1098. strncat(devinfo->fw_name, "/", fw_len);
  1099. strncat(devinfo->nvram_name, "/", nv_len);
  1100. fw_len--;
  1101. nv_len--;
  1102. }
  1103. }
  1104. strncat(devinfo->fw_name, fw_name, fw_len);
  1105. strncat(devinfo->nvram_name, nvram_name, nv_len);
  1106. return 0;
  1107. }
  1108. static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
  1109. const struct firmware *fw, void *nvram,
  1110. u32 nvram_len)
  1111. {
  1112. u32 sharedram_addr;
  1113. u32 sharedram_addr_written;
  1114. u32 loop_counter;
  1115. int err;
  1116. u32 address;
  1117. u32 resetintr;
  1118. devinfo->ringbell = brcmf_pcie_ringbell_v2;
  1119. devinfo->generic_corerev = BRCMF_PCIE_GENREV2;
  1120. brcmf_dbg(PCIE, "Halt ARM.\n");
  1121. err = brcmf_pcie_enter_download_state(devinfo);
  1122. if (err)
  1123. return err;
  1124. brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
  1125. brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
  1126. (void *)fw->data, fw->size);
  1127. resetintr = get_unaligned_le32(fw->data);
  1128. release_firmware(fw);
  1129. /* reset last 4 bytes of RAM address. to be used for shared
  1130. * area. This identifies when FW is running
  1131. */
  1132. brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
  1133. if (nvram) {
  1134. brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
  1135. address = devinfo->ci->rambase + devinfo->ci->ramsize -
  1136. nvram_len;
  1137. brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
  1138. brcmf_fw_nvram_free(nvram);
  1139. } else {
  1140. brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
  1141. devinfo->nvram_name);
  1142. }
  1143. sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
  1144. devinfo->ci->ramsize -
  1145. 4);
  1146. brcmf_dbg(PCIE, "Bring ARM in running state\n");
  1147. err = brcmf_pcie_exit_download_state(devinfo, resetintr);
  1148. if (err)
  1149. return err;
  1150. brcmf_dbg(PCIE, "Wait for FW init\n");
  1151. sharedram_addr = sharedram_addr_written;
  1152. loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
  1153. while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
  1154. msleep(50);
  1155. sharedram_addr = brcmf_pcie_read_ram32(devinfo,
  1156. devinfo->ci->ramsize -
  1157. 4);
  1158. loop_counter--;
  1159. }
  1160. if (sharedram_addr == sharedram_addr_written) {
  1161. brcmf_err("FW failed to initialize\n");
  1162. return -ENODEV;
  1163. }
  1164. brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
  1165. return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
  1166. }
  1167. static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
  1168. {
  1169. struct pci_dev *pdev;
  1170. int err;
  1171. phys_addr_t bar0_addr, bar1_addr;
  1172. ulong bar1_size;
  1173. pdev = devinfo->pdev;
  1174. err = pci_enable_device(pdev);
  1175. if (err) {
  1176. brcmf_err("pci_enable_device failed err=%d\n", err);
  1177. return err;
  1178. }
  1179. pci_set_master(pdev);
  1180. /* Bar-0 mapped address */
  1181. bar0_addr = pci_resource_start(pdev, 0);
  1182. /* Bar-1 mapped address */
  1183. bar1_addr = pci_resource_start(pdev, 2);
  1184. /* read Bar-1 mapped memory range */
  1185. bar1_size = pci_resource_len(pdev, 2);
  1186. if ((bar1_size == 0) || (bar1_addr == 0)) {
  1187. brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
  1188. bar1_size, (unsigned long long)bar1_addr);
  1189. return -EINVAL;
  1190. }
  1191. devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
  1192. devinfo->tcm = ioremap_nocache(bar1_addr, BRCMF_PCIE_TCM_MAP_SIZE);
  1193. devinfo->tcm_size = BRCMF_PCIE_TCM_MAP_SIZE;
  1194. if (!devinfo->regs || !devinfo->tcm) {
  1195. brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs,
  1196. devinfo->tcm);
  1197. return -EINVAL;
  1198. }
  1199. brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
  1200. devinfo->regs, (unsigned long long)bar0_addr);
  1201. brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx\n",
  1202. devinfo->tcm, (unsigned long long)bar1_addr);
  1203. return 0;
  1204. }
  1205. static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
  1206. {
  1207. if (devinfo->tcm)
  1208. iounmap(devinfo->tcm);
  1209. if (devinfo->regs)
  1210. iounmap(devinfo->regs);
  1211. pci_disable_device(devinfo->pdev);
  1212. }
  1213. static int brcmf_pcie_attach_bus(struct device *dev)
  1214. {
  1215. int ret;
  1216. /* Attach to the common driver interface */
  1217. ret = brcmf_attach(dev);
  1218. if (ret) {
  1219. brcmf_err("brcmf_attach failed\n");
  1220. } else {
  1221. ret = brcmf_bus_start(dev);
  1222. if (ret)
  1223. brcmf_err("dongle is not responding\n");
  1224. }
  1225. return ret;
  1226. }
  1227. static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
  1228. {
  1229. u32 ret_addr;
  1230. ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
  1231. addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
  1232. pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
  1233. return ret_addr;
  1234. }
  1235. static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
  1236. {
  1237. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1238. addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
  1239. return brcmf_pcie_read_reg32(devinfo, addr);
  1240. }
  1241. static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
  1242. {
  1243. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1244. addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
  1245. brcmf_pcie_write_reg32(devinfo, addr, value);
  1246. }
  1247. static int brcmf_pcie_buscoreprep(void *ctx)
  1248. {
  1249. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1250. int err;
  1251. err = brcmf_pcie_get_resource(devinfo);
  1252. if (err == 0) {
  1253. /* Set CC watchdog to reset all the cores on the chip to bring
  1254. * back dongle to a sane state.
  1255. */
  1256. brcmf_pcie_buscore_write32(ctx, CORE_CC_REG(SI_ENUM_BASE,
  1257. watchdog), 4);
  1258. msleep(100);
  1259. }
  1260. return err;
  1261. }
  1262. static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
  1263. u32 rstvec)
  1264. {
  1265. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1266. brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
  1267. }
  1268. static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
  1269. .prepare = brcmf_pcie_buscoreprep,
  1270. .activate = brcmf_pcie_buscore_activate,
  1271. .read32 = brcmf_pcie_buscore_read32,
  1272. .write32 = brcmf_pcie_buscore_write32,
  1273. };
  1274. static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
  1275. void *nvram, u32 nvram_len)
  1276. {
  1277. struct brcmf_bus *bus = dev_get_drvdata(dev);
  1278. struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
  1279. struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
  1280. struct brcmf_commonring **flowrings;
  1281. int ret;
  1282. u32 i;
  1283. brcmf_pcie_attach(devinfo);
  1284. ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
  1285. if (ret)
  1286. goto fail;
  1287. devinfo->state = BRCMFMAC_PCIE_STATE_UP;
  1288. ret = brcmf_pcie_init_ringbuffers(devinfo);
  1289. if (ret)
  1290. goto fail;
  1291. ret = brcmf_pcie_init_scratchbuffers(devinfo);
  1292. if (ret)
  1293. goto fail;
  1294. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  1295. ret = brcmf_pcie_request_irq(devinfo);
  1296. if (ret)
  1297. goto fail;
  1298. /* hook the commonrings in the bus structure. */
  1299. for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
  1300. bus->msgbuf->commonrings[i] =
  1301. &devinfo->shared.commonrings[i]->commonring;
  1302. flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings),
  1303. GFP_KERNEL);
  1304. if (!flowrings)
  1305. goto fail;
  1306. for (i = 0; i < devinfo->shared.nrof_flowrings; i++)
  1307. flowrings[i] = &devinfo->shared.flowrings[i].commonring;
  1308. bus->msgbuf->flowrings = flowrings;
  1309. bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
  1310. bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
  1311. bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings;
  1312. init_waitqueue_head(&devinfo->mbdata_resp_wait);
  1313. brcmf_pcie_intr_enable(devinfo);
  1314. if (brcmf_pcie_attach_bus(bus->dev) == 0)
  1315. return;
  1316. brcmf_pcie_bus_console_read(devinfo);
  1317. fail:
  1318. device_release_driver(dev);
  1319. }
  1320. static int
  1321. brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1322. {
  1323. int ret;
  1324. struct brcmf_pciedev_info *devinfo;
  1325. struct brcmf_pciedev *pcie_bus_dev;
  1326. struct brcmf_bus *bus;
  1327. brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
  1328. ret = -ENOMEM;
  1329. devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
  1330. if (devinfo == NULL)
  1331. return ret;
  1332. devinfo->pdev = pdev;
  1333. pcie_bus_dev = NULL;
  1334. devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
  1335. if (IS_ERR(devinfo->ci)) {
  1336. ret = PTR_ERR(devinfo->ci);
  1337. devinfo->ci = NULL;
  1338. goto fail;
  1339. }
  1340. pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
  1341. if (pcie_bus_dev == NULL) {
  1342. ret = -ENOMEM;
  1343. goto fail;
  1344. }
  1345. bus = kzalloc(sizeof(*bus), GFP_KERNEL);
  1346. if (!bus) {
  1347. ret = -ENOMEM;
  1348. goto fail;
  1349. }
  1350. bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
  1351. if (!bus->msgbuf) {
  1352. ret = -ENOMEM;
  1353. kfree(bus);
  1354. goto fail;
  1355. }
  1356. /* hook it all together. */
  1357. pcie_bus_dev->devinfo = devinfo;
  1358. pcie_bus_dev->bus = bus;
  1359. bus->dev = &pdev->dev;
  1360. bus->bus_priv.pcie = pcie_bus_dev;
  1361. bus->ops = &brcmf_pcie_bus_ops;
  1362. bus->proto_type = BRCMF_PROTO_MSGBUF;
  1363. bus->chip = devinfo->coreid;
  1364. bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
  1365. dev_set_drvdata(&pdev->dev, bus);
  1366. ret = brcmf_pcie_get_fwnames(devinfo);
  1367. if (ret)
  1368. goto fail_bus;
  1369. ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM |
  1370. BRCMF_FW_REQ_NV_OPTIONAL,
  1371. devinfo->fw_name, devinfo->nvram_name,
  1372. brcmf_pcie_setup);
  1373. if (ret == 0)
  1374. return 0;
  1375. fail_bus:
  1376. kfree(bus->msgbuf);
  1377. kfree(bus);
  1378. fail:
  1379. brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device);
  1380. brcmf_pcie_release_resource(devinfo);
  1381. if (devinfo->ci)
  1382. brcmf_chip_detach(devinfo->ci);
  1383. kfree(pcie_bus_dev);
  1384. kfree(devinfo);
  1385. return ret;
  1386. }
  1387. static void
  1388. brcmf_pcie_remove(struct pci_dev *pdev)
  1389. {
  1390. struct brcmf_pciedev_info *devinfo;
  1391. struct brcmf_bus *bus;
  1392. brcmf_dbg(PCIE, "Enter\n");
  1393. bus = dev_get_drvdata(&pdev->dev);
  1394. if (bus == NULL)
  1395. return;
  1396. devinfo = bus->bus_priv.pcie->devinfo;
  1397. devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
  1398. if (devinfo->ci)
  1399. brcmf_pcie_intr_disable(devinfo);
  1400. brcmf_detach(&pdev->dev);
  1401. kfree(bus->bus_priv.pcie);
  1402. kfree(bus->msgbuf->flowrings);
  1403. kfree(bus->msgbuf);
  1404. kfree(bus);
  1405. brcmf_pcie_release_irq(devinfo);
  1406. brcmf_pcie_release_scratchbuffers(devinfo);
  1407. brcmf_pcie_release_ringbuffers(devinfo);
  1408. brcmf_pcie_reset_device(devinfo);
  1409. brcmf_pcie_release_resource(devinfo);
  1410. if (devinfo->ci)
  1411. brcmf_chip_detach(devinfo->ci);
  1412. kfree(devinfo);
  1413. dev_set_drvdata(&pdev->dev, NULL);
  1414. }
  1415. #ifdef CONFIG_PM
  1416. static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
  1417. {
  1418. struct brcmf_pciedev_info *devinfo;
  1419. struct brcmf_bus *bus;
  1420. int err;
  1421. brcmf_dbg(PCIE, "Enter, state=%d, pdev=%p\n", state.event, pdev);
  1422. bus = dev_get_drvdata(&pdev->dev);
  1423. devinfo = bus->bus_priv.pcie->devinfo;
  1424. brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
  1425. devinfo->mbdata_completed = false;
  1426. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
  1427. wait_event_timeout(devinfo->mbdata_resp_wait,
  1428. devinfo->mbdata_completed,
  1429. msecs_to_jiffies(BRCMF_PCIE_MBDATA_TIMEOUT));
  1430. if (!devinfo->mbdata_completed) {
  1431. brcmf_err("Timeout on response for entering D3 substate\n");
  1432. return -EIO;
  1433. }
  1434. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM_IN_USE);
  1435. err = pci_save_state(pdev);
  1436. if (err)
  1437. brcmf_err("pci_save_state failed, err=%d\n", err);
  1438. if ((err) || (!devinfo->wowl_enabled)) {
  1439. brcmf_chip_detach(devinfo->ci);
  1440. devinfo->ci = NULL;
  1441. brcmf_pcie_remove(pdev);
  1442. return 0;
  1443. }
  1444. return pci_prepare_to_sleep(pdev);
  1445. }
  1446. static int brcmf_pcie_resume(struct pci_dev *pdev)
  1447. {
  1448. struct brcmf_pciedev_info *devinfo;
  1449. struct brcmf_bus *bus;
  1450. int err;
  1451. bus = dev_get_drvdata(&pdev->dev);
  1452. brcmf_dbg(PCIE, "Enter, pdev=%p, bus=%p\n", pdev, bus);
  1453. err = pci_set_power_state(pdev, PCI_D0);
  1454. if (err) {
  1455. brcmf_err("pci_set_power_state failed, err=%d\n", err);
  1456. goto cleanup;
  1457. }
  1458. pci_restore_state(pdev);
  1459. pci_enable_wake(pdev, PCI_D3hot, false);
  1460. pci_enable_wake(pdev, PCI_D3cold, false);
  1461. /* Check if device is still up and running, if so we are ready */
  1462. if (bus) {
  1463. devinfo = bus->bus_priv.pcie->devinfo;
  1464. if (brcmf_pcie_read_reg32(devinfo,
  1465. BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
  1466. if (brcmf_pcie_send_mb_data(devinfo,
  1467. BRCMF_H2D_HOST_D0_INFORM))
  1468. goto cleanup;
  1469. brcmf_dbg(PCIE, "Hot resume, continue....\n");
  1470. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  1471. brcmf_bus_change_state(bus, BRCMF_BUS_UP);
  1472. brcmf_pcie_intr_enable(devinfo);
  1473. return 0;
  1474. }
  1475. }
  1476. cleanup:
  1477. if (bus) {
  1478. devinfo = bus->bus_priv.pcie->devinfo;
  1479. brcmf_chip_detach(devinfo->ci);
  1480. devinfo->ci = NULL;
  1481. brcmf_pcie_remove(pdev);
  1482. }
  1483. err = brcmf_pcie_probe(pdev, NULL);
  1484. if (err)
  1485. brcmf_err("probe after resume failed, err=%d\n", err);
  1486. return err;
  1487. }
  1488. #endif /* CONFIG_PM */
  1489. #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
  1490. PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
  1491. static struct pci_device_id brcmf_pcie_devid_table[] = {
  1492. BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
  1493. BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
  1494. BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
  1495. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
  1496. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
  1497. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
  1498. { /* end: all zeroes */ }
  1499. };
  1500. MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
  1501. static struct pci_driver brcmf_pciedrvr = {
  1502. .node = {},
  1503. .name = KBUILD_MODNAME,
  1504. .id_table = brcmf_pcie_devid_table,
  1505. .probe = brcmf_pcie_probe,
  1506. .remove = brcmf_pcie_remove,
  1507. #ifdef CONFIG_PM
  1508. .suspend = brcmf_pcie_suspend,
  1509. .resume = brcmf_pcie_resume
  1510. #endif /* CONFIG_PM */
  1511. };
  1512. void brcmf_pcie_register(void)
  1513. {
  1514. int err;
  1515. brcmf_dbg(PCIE, "Enter\n");
  1516. err = pci_register_driver(&brcmf_pciedrvr);
  1517. if (err)
  1518. brcmf_err("PCIE driver registration failed, err=%d\n", err);
  1519. }
  1520. void brcmf_pcie_exit(void)
  1521. {
  1522. brcmf_dbg(PCIE, "Enter\n");
  1523. pci_unregister_driver(&brcmf_pciedrvr);
  1524. }