pcie-iproc.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313
  1. /*
  2. * Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de>
  3. * Copyright (C) 2015 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation version 2.
  8. *
  9. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  10. * kind, whether express or implied; without even the implied warranty
  11. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/pci.h>
  16. #include <linux/msi.h>
  17. #include <linux/clk.h>
  18. #include <linux/module.h>
  19. #include <linux/mbus.h>
  20. #include <linux/slab.h>
  21. #include <linux/delay.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/irqchip/arm-gic-v3.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/of_address.h>
  26. #include <linux/of_pci.h>
  27. #include <linux/of_irq.h>
  28. #include <linux/of_platform.h>
  29. #include <linux/phy/phy.h>
  30. #include "pcie-iproc.h"
  31. #define EP_PERST_SOURCE_SELECT_SHIFT 2
  32. #define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT)
  33. #define EP_MODE_SURVIVE_PERST_SHIFT 1
  34. #define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT)
  35. #define RC_PCIE_RST_OUTPUT_SHIFT 0
  36. #define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT)
  37. #define PAXC_RESET_MASK 0x7f
  38. #define GIC_V3_CFG_SHIFT 0
  39. #define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT)
  40. #define MSI_ENABLE_CFG_SHIFT 0
  41. #define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT)
  42. #define CFG_IND_ADDR_MASK 0x00001ffc
  43. #define CFG_ADDR_BUS_NUM_SHIFT 20
  44. #define CFG_ADDR_BUS_NUM_MASK 0x0ff00000
  45. #define CFG_ADDR_DEV_NUM_SHIFT 15
  46. #define CFG_ADDR_DEV_NUM_MASK 0x000f8000
  47. #define CFG_ADDR_FUNC_NUM_SHIFT 12
  48. #define CFG_ADDR_FUNC_NUM_MASK 0x00007000
  49. #define CFG_ADDR_REG_NUM_SHIFT 2
  50. #define CFG_ADDR_REG_NUM_MASK 0x00000ffc
  51. #define CFG_ADDR_CFG_TYPE_SHIFT 0
  52. #define CFG_ADDR_CFG_TYPE_MASK 0x00000003
  53. #define SYS_RC_INTX_MASK 0xf
  54. #define PCIE_PHYLINKUP_SHIFT 3
  55. #define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT)
  56. #define PCIE_DL_ACTIVE_SHIFT 2
  57. #define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT)
  58. #define APB_ERR_EN_SHIFT 0
  59. #define APB_ERR_EN BIT(APB_ERR_EN_SHIFT)
  60. /* derive the enum index of the outbound/inbound mapping registers */
  61. #define MAP_REG(base_reg, index) ((base_reg) + (index) * 2)
  62. /*
  63. * Maximum number of outbound mapping window sizes that can be supported by any
  64. * OARR/OMAP mapping pair
  65. */
  66. #define MAX_NUM_OB_WINDOW_SIZES 4
  67. #define OARR_VALID_SHIFT 0
  68. #define OARR_VALID BIT(OARR_VALID_SHIFT)
  69. #define OARR_SIZE_CFG_SHIFT 1
  70. /*
  71. * Maximum number of inbound mapping region sizes that can be supported by an
  72. * IARR
  73. */
  74. #define MAX_NUM_IB_REGION_SIZES 9
  75. #define IMAP_VALID_SHIFT 0
  76. #define IMAP_VALID BIT(IMAP_VALID_SHIFT)
  77. #define PCI_EXP_CAP 0xac
  78. #define IPROC_PCIE_REG_INVALID 0xffff
  79. /**
  80. * iProc PCIe outbound mapping controller specific parameters
  81. *
  82. * @window_sizes: list of supported outbound mapping window sizes in MB
  83. * @nr_sizes: number of supported outbound mapping window sizes
  84. */
  85. struct iproc_pcie_ob_map {
  86. resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES];
  87. unsigned int nr_sizes;
  88. };
  89. static const struct iproc_pcie_ob_map paxb_ob_map[] = {
  90. {
  91. /* OARR0/OMAP0 */
  92. .window_sizes = { 128, 256 },
  93. .nr_sizes = 2,
  94. },
  95. {
  96. /* OARR1/OMAP1 */
  97. .window_sizes = { 128, 256 },
  98. .nr_sizes = 2,
  99. },
  100. };
  101. static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = {
  102. {
  103. /* OARR0/OMAP0 */
  104. .window_sizes = { 128, 256 },
  105. .nr_sizes = 2,
  106. },
  107. {
  108. /* OARR1/OMAP1 */
  109. .window_sizes = { 128, 256 },
  110. .nr_sizes = 2,
  111. },
  112. {
  113. /* OARR2/OMAP2 */
  114. .window_sizes = { 128, 256, 512, 1024 },
  115. .nr_sizes = 4,
  116. },
  117. {
  118. /* OARR3/OMAP3 */
  119. .window_sizes = { 128, 256, 512, 1024 },
  120. .nr_sizes = 4,
  121. },
  122. };
  123. /**
  124. * iProc PCIe inbound mapping type
  125. */
  126. enum iproc_pcie_ib_map_type {
  127. /* for DDR memory */
  128. IPROC_PCIE_IB_MAP_MEM = 0,
  129. /* for device I/O memory */
  130. IPROC_PCIE_IB_MAP_IO,
  131. /* invalid or unused */
  132. IPROC_PCIE_IB_MAP_INVALID
  133. };
  134. /**
  135. * iProc PCIe inbound mapping controller specific parameters
  136. *
  137. * @type: inbound mapping region type
  138. * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
  139. * SZ_1G
  140. * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
  141. * GB, depedning on the size unit
  142. * @nr_sizes: number of supported inbound mapping region sizes
  143. * @nr_windows: number of supported inbound mapping windows for the region
  144. * @imap_addr_offset: register offset between the upper and lower 32-bit
  145. * IMAP address registers
  146. * @imap_window_offset: register offset between each IMAP window
  147. */
  148. struct iproc_pcie_ib_map {
  149. enum iproc_pcie_ib_map_type type;
  150. unsigned int size_unit;
  151. resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES];
  152. unsigned int nr_sizes;
  153. unsigned int nr_windows;
  154. u16 imap_addr_offset;
  155. u16 imap_window_offset;
  156. };
  157. static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = {
  158. {
  159. /* IARR0/IMAP0 */
  160. .type = IPROC_PCIE_IB_MAP_IO,
  161. .size_unit = SZ_1K,
  162. .region_sizes = { 32 },
  163. .nr_sizes = 1,
  164. .nr_windows = 8,
  165. .imap_addr_offset = 0x40,
  166. .imap_window_offset = 0x4,
  167. },
  168. {
  169. /* IARR1/IMAP1 (currently unused) */
  170. .type = IPROC_PCIE_IB_MAP_INVALID,
  171. },
  172. {
  173. /* IARR2/IMAP2 */
  174. .type = IPROC_PCIE_IB_MAP_MEM,
  175. .size_unit = SZ_1M,
  176. .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192,
  177. 16384 },
  178. .nr_sizes = 9,
  179. .nr_windows = 1,
  180. .imap_addr_offset = 0x4,
  181. .imap_window_offset = 0x8,
  182. },
  183. {
  184. /* IARR3/IMAP3 */
  185. .type = IPROC_PCIE_IB_MAP_MEM,
  186. .size_unit = SZ_1G,
  187. .region_sizes = { 1, 2, 4, 8, 16, 32 },
  188. .nr_sizes = 6,
  189. .nr_windows = 8,
  190. .imap_addr_offset = 0x4,
  191. .imap_window_offset = 0x8,
  192. },
  193. {
  194. /* IARR4/IMAP4 */
  195. .type = IPROC_PCIE_IB_MAP_MEM,
  196. .size_unit = SZ_1G,
  197. .region_sizes = { 32, 64, 128, 256, 512 },
  198. .nr_sizes = 5,
  199. .nr_windows = 8,
  200. .imap_addr_offset = 0x4,
  201. .imap_window_offset = 0x8,
  202. },
  203. };
  204. /*
  205. * iProc PCIe host registers
  206. */
  207. enum iproc_pcie_reg {
  208. /* clock/reset signal control */
  209. IPROC_PCIE_CLK_CTRL = 0,
  210. /*
  211. * To allow MSI to be steered to an external MSI controller (e.g., ARM
  212. * GICv3 ITS)
  213. */
  214. IPROC_PCIE_MSI_GIC_MODE,
  215. /*
  216. * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the
  217. * window where the MSI posted writes are written, for the writes to be
  218. * interpreted as MSI writes.
  219. */
  220. IPROC_PCIE_MSI_BASE_ADDR,
  221. IPROC_PCIE_MSI_WINDOW_SIZE,
  222. /*
  223. * To hold the address of the register where the MSI writes are
  224. * programed. When ARM GICv3 ITS is used, this should be programmed
  225. * with the address of the GITS_TRANSLATER register.
  226. */
  227. IPROC_PCIE_MSI_ADDR_LO,
  228. IPROC_PCIE_MSI_ADDR_HI,
  229. /* enable MSI */
  230. IPROC_PCIE_MSI_EN_CFG,
  231. /* allow access to root complex configuration space */
  232. IPROC_PCIE_CFG_IND_ADDR,
  233. IPROC_PCIE_CFG_IND_DATA,
  234. /* allow access to device configuration space */
  235. IPROC_PCIE_CFG_ADDR,
  236. IPROC_PCIE_CFG_DATA,
  237. /* enable INTx */
  238. IPROC_PCIE_INTX_EN,
  239. /* outbound address mapping */
  240. IPROC_PCIE_OARR0,
  241. IPROC_PCIE_OMAP0,
  242. IPROC_PCIE_OARR1,
  243. IPROC_PCIE_OMAP1,
  244. IPROC_PCIE_OARR2,
  245. IPROC_PCIE_OMAP2,
  246. IPROC_PCIE_OARR3,
  247. IPROC_PCIE_OMAP3,
  248. /* inbound address mapping */
  249. IPROC_PCIE_IARR0,
  250. IPROC_PCIE_IMAP0,
  251. IPROC_PCIE_IARR1,
  252. IPROC_PCIE_IMAP1,
  253. IPROC_PCIE_IARR2,
  254. IPROC_PCIE_IMAP2,
  255. IPROC_PCIE_IARR3,
  256. IPROC_PCIE_IMAP3,
  257. IPROC_PCIE_IARR4,
  258. IPROC_PCIE_IMAP4,
  259. /* link status */
  260. IPROC_PCIE_LINK_STATUS,
  261. /* enable APB error for unsupported requests */
  262. IPROC_PCIE_APB_ERR_EN,
  263. /* total number of core registers */
  264. IPROC_PCIE_MAX_NUM_REG,
  265. };
  266. /* iProc PCIe PAXB BCMA registers */
  267. static const u16 iproc_pcie_reg_paxb_bcma[] = {
  268. [IPROC_PCIE_CLK_CTRL] = 0x000,
  269. [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
  270. [IPROC_PCIE_CFG_IND_DATA] = 0x124,
  271. [IPROC_PCIE_CFG_ADDR] = 0x1f8,
  272. [IPROC_PCIE_CFG_DATA] = 0x1fc,
  273. [IPROC_PCIE_INTX_EN] = 0x330,
  274. [IPROC_PCIE_LINK_STATUS] = 0xf0c,
  275. };
  276. /* iProc PCIe PAXB registers */
  277. static const u16 iproc_pcie_reg_paxb[] = {
  278. [IPROC_PCIE_CLK_CTRL] = 0x000,
  279. [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
  280. [IPROC_PCIE_CFG_IND_DATA] = 0x124,
  281. [IPROC_PCIE_CFG_ADDR] = 0x1f8,
  282. [IPROC_PCIE_CFG_DATA] = 0x1fc,
  283. [IPROC_PCIE_INTX_EN] = 0x330,
  284. [IPROC_PCIE_OARR0] = 0xd20,
  285. [IPROC_PCIE_OMAP0] = 0xd40,
  286. [IPROC_PCIE_OARR1] = 0xd28,
  287. [IPROC_PCIE_OMAP1] = 0xd48,
  288. [IPROC_PCIE_LINK_STATUS] = 0xf0c,
  289. [IPROC_PCIE_APB_ERR_EN] = 0xf40,
  290. };
  291. /* iProc PCIe PAXB v2 registers */
  292. static const u16 iproc_pcie_reg_paxb_v2[] = {
  293. [IPROC_PCIE_CLK_CTRL] = 0x000,
  294. [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
  295. [IPROC_PCIE_CFG_IND_DATA] = 0x124,
  296. [IPROC_PCIE_CFG_ADDR] = 0x1f8,
  297. [IPROC_PCIE_CFG_DATA] = 0x1fc,
  298. [IPROC_PCIE_INTX_EN] = 0x330,
  299. [IPROC_PCIE_OARR0] = 0xd20,
  300. [IPROC_PCIE_OMAP0] = 0xd40,
  301. [IPROC_PCIE_OARR1] = 0xd28,
  302. [IPROC_PCIE_OMAP1] = 0xd48,
  303. [IPROC_PCIE_OARR2] = 0xd60,
  304. [IPROC_PCIE_OMAP2] = 0xd68,
  305. [IPROC_PCIE_OARR3] = 0xdf0,
  306. [IPROC_PCIE_OMAP3] = 0xdf8,
  307. [IPROC_PCIE_IARR0] = 0xd00,
  308. [IPROC_PCIE_IMAP0] = 0xc00,
  309. [IPROC_PCIE_IARR2] = 0xd10,
  310. [IPROC_PCIE_IMAP2] = 0xcc0,
  311. [IPROC_PCIE_IARR3] = 0xe00,
  312. [IPROC_PCIE_IMAP3] = 0xe08,
  313. [IPROC_PCIE_IARR4] = 0xe68,
  314. [IPROC_PCIE_IMAP4] = 0xe70,
  315. [IPROC_PCIE_LINK_STATUS] = 0xf0c,
  316. [IPROC_PCIE_APB_ERR_EN] = 0xf40,
  317. };
  318. /* iProc PCIe PAXC v1 registers */
  319. static const u16 iproc_pcie_reg_paxc[] = {
  320. [IPROC_PCIE_CLK_CTRL] = 0x000,
  321. [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
  322. [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
  323. [IPROC_PCIE_CFG_ADDR] = 0x1f8,
  324. [IPROC_PCIE_CFG_DATA] = 0x1fc,
  325. };
  326. /* iProc PCIe PAXC v2 registers */
  327. static const u16 iproc_pcie_reg_paxc_v2[] = {
  328. [IPROC_PCIE_MSI_GIC_MODE] = 0x050,
  329. [IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
  330. [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
  331. [IPROC_PCIE_MSI_ADDR_LO] = 0x07c,
  332. [IPROC_PCIE_MSI_ADDR_HI] = 0x080,
  333. [IPROC_PCIE_MSI_EN_CFG] = 0x09c,
  334. [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
  335. [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
  336. [IPROC_PCIE_CFG_ADDR] = 0x1f8,
  337. [IPROC_PCIE_CFG_DATA] = 0x1fc,
  338. };
  339. static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
  340. {
  341. struct iproc_pcie *pcie;
  342. #ifdef CONFIG_ARM
  343. struct pci_sys_data *sys = bus->sysdata;
  344. pcie = sys->private_data;
  345. #else
  346. pcie = bus->sysdata;
  347. #endif
  348. return pcie;
  349. }
  350. static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset)
  351. {
  352. return !!(reg_offset == IPROC_PCIE_REG_INVALID);
  353. }
  354. static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie,
  355. enum iproc_pcie_reg reg)
  356. {
  357. return pcie->reg_offsets[reg];
  358. }
  359. static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie,
  360. enum iproc_pcie_reg reg)
  361. {
  362. u16 offset = iproc_pcie_reg_offset(pcie, reg);
  363. if (iproc_pcie_reg_is_invalid(offset))
  364. return 0;
  365. return readl(pcie->base + offset);
  366. }
  367. static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
  368. enum iproc_pcie_reg reg, u32 val)
  369. {
  370. u16 offset = iproc_pcie_reg_offset(pcie, reg);
  371. if (iproc_pcie_reg_is_invalid(offset))
  372. return;
  373. writel(val, pcie->base + offset);
  374. }
  375. /**
  376. * APB error forwarding can be disabled during access of configuration
  377. * registers of the endpoint device, to prevent unsupported requests
  378. * (typically seen during enumeration with multi-function devices) from
  379. * triggering a system exception.
  380. */
  381. static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
  382. bool disable)
  383. {
  384. struct iproc_pcie *pcie = iproc_data(bus);
  385. u32 val;
  386. if (bus->number && pcie->has_apb_err_disable) {
  387. val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN);
  388. if (disable)
  389. val &= ~APB_ERR_EN;
  390. else
  391. val |= APB_ERR_EN;
  392. iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val);
  393. }
  394. }
  395. /**
  396. * Note access to the configuration registers are protected at the higher layer
  397. * by 'pci_lock' in drivers/pci/access.c
  398. */
  399. static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
  400. unsigned int devfn,
  401. int where)
  402. {
  403. struct iproc_pcie *pcie = iproc_data(bus);
  404. unsigned slot = PCI_SLOT(devfn);
  405. unsigned fn = PCI_FUNC(devfn);
  406. unsigned busno = bus->number;
  407. u32 val;
  408. u16 offset;
  409. /* root complex access */
  410. if (busno == 0) {
  411. if (slot > 0 || fn > 0)
  412. return NULL;
  413. iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
  414. where & CFG_IND_ADDR_MASK);
  415. offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
  416. if (iproc_pcie_reg_is_invalid(offset))
  417. return NULL;
  418. else
  419. return (pcie->base + offset);
  420. }
  421. /*
  422. * PAXC is connected to an internally emulated EP within the SoC. It
  423. * allows only one device.
  424. */
  425. if (pcie->ep_is_internal)
  426. if (slot > 0)
  427. return NULL;
  428. /* EP device access */
  429. val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
  430. (slot << CFG_ADDR_DEV_NUM_SHIFT) |
  431. (fn << CFG_ADDR_FUNC_NUM_SHIFT) |
  432. (where & CFG_ADDR_REG_NUM_MASK) |
  433. (1 & CFG_ADDR_CFG_TYPE_MASK);
  434. iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val);
  435. offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA);
  436. if (iproc_pcie_reg_is_invalid(offset))
  437. return NULL;
  438. else
  439. return (pcie->base + offset);
  440. }
  441. static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
  442. int where, int size, u32 *val)
  443. {
  444. int ret;
  445. iproc_pcie_apb_err_disable(bus, true);
  446. ret = pci_generic_config_read32(bus, devfn, where, size, val);
  447. iproc_pcie_apb_err_disable(bus, false);
  448. return ret;
  449. }
  450. static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
  451. int where, int size, u32 val)
  452. {
  453. int ret;
  454. iproc_pcie_apb_err_disable(bus, true);
  455. ret = pci_generic_config_write32(bus, devfn, where, size, val);
  456. iproc_pcie_apb_err_disable(bus, false);
  457. return ret;
  458. }
  459. static struct pci_ops iproc_pcie_ops = {
  460. .map_bus = iproc_pcie_map_cfg_bus,
  461. .read = iproc_pcie_config_read32,
  462. .write = iproc_pcie_config_write32,
  463. };
  464. static void iproc_pcie_reset(struct iproc_pcie *pcie)
  465. {
  466. u32 val;
  467. /*
  468. * PAXC and the internal emulated endpoint device downstream should not
  469. * be reset. If firmware has been loaded on the endpoint device at an
  470. * earlier boot stage, reset here causes issues.
  471. */
  472. if (pcie->ep_is_internal)
  473. return;
  474. /*
  475. * Select perst_b signal as reset source. Put the device into reset,
  476. * and then bring it out of reset
  477. */
  478. val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
  479. val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
  480. ~RC_PCIE_RST_OUTPUT;
  481. iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
  482. udelay(250);
  483. val |= RC_PCIE_RST_OUTPUT;
  484. iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
  485. msleep(100);
  486. }
  487. static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
  488. {
  489. struct device *dev = pcie->dev;
  490. u8 hdr_type;
  491. u32 link_ctrl, class, val;
  492. u16 pos = PCI_EXP_CAP, link_status;
  493. bool link_is_active = false;
  494. /*
  495. * PAXC connects to emulated endpoint devices directly and does not
  496. * have a Serdes. Therefore skip the link detection logic here.
  497. */
  498. if (pcie->ep_is_internal)
  499. return 0;
  500. val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
  501. if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
  502. dev_err(dev, "PHY or data link is INACTIVE!\n");
  503. return -ENODEV;
  504. }
  505. /* make sure we are not in EP mode */
  506. pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
  507. if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
  508. dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
  509. return -EFAULT;
  510. }
  511. /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
  512. #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
  513. #define PCI_CLASS_BRIDGE_MASK 0xffff00
  514. #define PCI_CLASS_BRIDGE_SHIFT 8
  515. pci_bus_read_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, &class);
  516. class &= ~PCI_CLASS_BRIDGE_MASK;
  517. class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
  518. pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class);
  519. /* check link status to see if link is active */
  520. pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
  521. if (link_status & PCI_EXP_LNKSTA_NLW)
  522. link_is_active = true;
  523. if (!link_is_active) {
  524. /* try GEN 1 link speed */
  525. #define PCI_TARGET_LINK_SPEED_MASK 0xf
  526. #define PCI_TARGET_LINK_SPEED_GEN2 0x2
  527. #define PCI_TARGET_LINK_SPEED_GEN1 0x1
  528. pci_bus_read_config_dword(bus, 0,
  529. pos + PCI_EXP_LNKCTL2,
  530. &link_ctrl);
  531. if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
  532. PCI_TARGET_LINK_SPEED_GEN2) {
  533. link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
  534. link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
  535. pci_bus_write_config_dword(bus, 0,
  536. pos + PCI_EXP_LNKCTL2,
  537. link_ctrl);
  538. msleep(100);
  539. pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
  540. &link_status);
  541. if (link_status & PCI_EXP_LNKSTA_NLW)
  542. link_is_active = true;
  543. }
  544. }
  545. dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
  546. return link_is_active ? 0 : -ENODEV;
  547. }
  548. static void iproc_pcie_enable(struct iproc_pcie *pcie)
  549. {
  550. iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK);
  551. }
  552. static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie,
  553. int window_idx)
  554. {
  555. u32 val;
  556. val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx));
  557. return !!(val & OARR_VALID);
  558. }
  559. static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
  560. int size_idx, u64 axi_addr, u64 pci_addr)
  561. {
  562. struct device *dev = pcie->dev;
  563. u16 oarr_offset, omap_offset;
  564. /*
  565. * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based
  566. * on window index.
  567. */
  568. oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0,
  569. window_idx));
  570. omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0,
  571. window_idx));
  572. if (iproc_pcie_reg_is_invalid(oarr_offset) ||
  573. iproc_pcie_reg_is_invalid(omap_offset))
  574. return -EINVAL;
  575. /*
  576. * Program the OARR registers. The upper 32-bit OARR register is
  577. * always right after the lower 32-bit OARR register.
  578. */
  579. writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) |
  580. OARR_VALID, pcie->base + oarr_offset);
  581. writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4);
  582. /* now program the OMAP registers */
  583. writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
  584. writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
  585. dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
  586. window_idx, oarr_offset, &axi_addr, &pci_addr);
  587. dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n",
  588. readl(pcie->base + oarr_offset),
  589. readl(pcie->base + oarr_offset + 4));
  590. dev_info(dev, "omap lo 0x%x omap hi 0x%x\n",
  591. readl(pcie->base + omap_offset),
  592. readl(pcie->base + omap_offset + 4));
  593. return 0;
  594. }
  595. /**
  596. * Some iProc SoCs require the SW to configure the outbound address mapping
  597. *
  598. * Outbound address translation:
  599. *
  600. * iproc_pcie_address = axi_address - axi_offset
  601. * OARR = iproc_pcie_address
  602. * OMAP = pci_addr
  603. *
  604. * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address
  605. */
  606. static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
  607. u64 pci_addr, resource_size_t size)
  608. {
  609. struct iproc_pcie_ob *ob = &pcie->ob;
  610. struct device *dev = pcie->dev;
  611. int ret = -EINVAL, window_idx, size_idx;
  612. if (axi_addr < ob->axi_offset) {
  613. dev_err(dev, "axi address %pap less than offset %pap\n",
  614. &axi_addr, &ob->axi_offset);
  615. return -EINVAL;
  616. }
  617. /*
  618. * Translate the AXI address to the internal address used by the iProc
  619. * PCIe core before programming the OARR
  620. */
  621. axi_addr -= ob->axi_offset;
  622. /* iterate through all OARR/OMAP mapping windows */
  623. for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) {
  624. const struct iproc_pcie_ob_map *ob_map =
  625. &pcie->ob_map[window_idx];
  626. /*
  627. * If current outbound window is already in use, move on to the
  628. * next one.
  629. */
  630. if (iproc_pcie_ob_is_valid(pcie, window_idx))
  631. continue;
  632. /*
  633. * Iterate through all supported window sizes within the
  634. * OARR/OMAP pair to find a match. Go through the window sizes
  635. * in a descending order.
  636. */
  637. for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0;
  638. size_idx--) {
  639. resource_size_t window_size =
  640. ob_map->window_sizes[size_idx] * SZ_1M;
  641. if (size < window_size)
  642. continue;
  643. if (!IS_ALIGNED(axi_addr, window_size) ||
  644. !IS_ALIGNED(pci_addr, window_size)) {
  645. dev_err(dev,
  646. "axi %pap or pci %pap not aligned\n",
  647. &axi_addr, &pci_addr);
  648. return -EINVAL;
  649. }
  650. /*
  651. * Match found! Program both OARR and OMAP and mark
  652. * them as a valid entry.
  653. */
  654. ret = iproc_pcie_ob_write(pcie, window_idx, size_idx,
  655. axi_addr, pci_addr);
  656. if (ret)
  657. goto err_ob;
  658. size -= window_size;
  659. if (size == 0)
  660. return 0;
  661. /*
  662. * If we are here, we are done with the current window,
  663. * but not yet finished all mappings. Need to move on
  664. * to the next window.
  665. */
  666. axi_addr += window_size;
  667. pci_addr += window_size;
  668. break;
  669. }
  670. }
  671. err_ob:
  672. dev_err(dev, "unable to configure outbound mapping\n");
  673. dev_err(dev,
  674. "axi %pap, axi offset %pap, pci %pap, res size %pap\n",
  675. &axi_addr, &ob->axi_offset, &pci_addr, &size);
  676. return ret;
  677. }
  678. static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
  679. struct list_head *resources)
  680. {
  681. struct device *dev = pcie->dev;
  682. struct resource_entry *window;
  683. int ret;
  684. resource_list_for_each_entry(window, resources) {
  685. struct resource *res = window->res;
  686. u64 res_type = resource_type(res);
  687. switch (res_type) {
  688. case IORESOURCE_IO:
  689. case IORESOURCE_BUS:
  690. break;
  691. case IORESOURCE_MEM:
  692. ret = iproc_pcie_setup_ob(pcie, res->start,
  693. res->start - window->offset,
  694. resource_size(res));
  695. if (ret)
  696. return ret;
  697. break;
  698. default:
  699. dev_err(dev, "invalid resource %pR\n", res);
  700. return -EINVAL;
  701. }
  702. }
  703. return 0;
  704. }
  705. static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie,
  706. int region_idx)
  707. {
  708. const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
  709. u32 val;
  710. val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx));
  711. return !!(val & (BIT(ib_map->nr_sizes) - 1));
  712. }
  713. static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map,
  714. enum iproc_pcie_ib_map_type type)
  715. {
  716. return !!(ib_map->type == type);
  717. }
  718. static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
  719. int size_idx, int nr_windows, u64 axi_addr,
  720. u64 pci_addr, resource_size_t size)
  721. {
  722. struct device *dev = pcie->dev;
  723. const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
  724. u16 iarr_offset, imap_offset;
  725. u32 val;
  726. int window_idx;
  727. iarr_offset = iproc_pcie_reg_offset(pcie,
  728. MAP_REG(IPROC_PCIE_IARR0, region_idx));
  729. imap_offset = iproc_pcie_reg_offset(pcie,
  730. MAP_REG(IPROC_PCIE_IMAP0, region_idx));
  731. if (iproc_pcie_reg_is_invalid(iarr_offset) ||
  732. iproc_pcie_reg_is_invalid(imap_offset))
  733. return -EINVAL;
  734. dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
  735. region_idx, iarr_offset, &axi_addr, &pci_addr);
  736. /*
  737. * Program the IARR registers. The upper 32-bit IARR register is
  738. * always right after the lower 32-bit IARR register.
  739. */
  740. writel(lower_32_bits(pci_addr) | BIT(size_idx),
  741. pcie->base + iarr_offset);
  742. writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
  743. dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n",
  744. readl(pcie->base + iarr_offset),
  745. readl(pcie->base + iarr_offset + 4));
  746. /*
  747. * Now program the IMAP registers. Each IARR region may have one or
  748. * more IMAP windows.
  749. */
  750. size >>= ilog2(nr_windows);
  751. for (window_idx = 0; window_idx < nr_windows; window_idx++) {
  752. val = readl(pcie->base + imap_offset);
  753. val |= lower_32_bits(axi_addr) | IMAP_VALID;
  754. writel(val, pcie->base + imap_offset);
  755. writel(upper_32_bits(axi_addr),
  756. pcie->base + imap_offset + ib_map->imap_addr_offset);
  757. dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
  758. window_idx, readl(pcie->base + imap_offset),
  759. readl(pcie->base + imap_offset +
  760. ib_map->imap_addr_offset));
  761. imap_offset += ib_map->imap_window_offset;
  762. axi_addr += size;
  763. }
  764. return 0;
  765. }
  766. static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
  767. struct of_pci_range *range,
  768. enum iproc_pcie_ib_map_type type)
  769. {
  770. struct device *dev = pcie->dev;
  771. struct iproc_pcie_ib *ib = &pcie->ib;
  772. int ret;
  773. unsigned int region_idx, size_idx;
  774. u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
  775. resource_size_t size = range->size;
  776. /* iterate through all IARR mapping regions */
  777. for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
  778. const struct iproc_pcie_ib_map *ib_map =
  779. &pcie->ib_map[region_idx];
  780. /*
  781. * If current inbound region is already in use or not a
  782. * compatible type, move on to the next.
  783. */
  784. if (iproc_pcie_ib_is_in_use(pcie, region_idx) ||
  785. !iproc_pcie_ib_check_type(ib_map, type))
  786. continue;
  787. /* iterate through all supported region sizes to find a match */
  788. for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) {
  789. resource_size_t region_size =
  790. ib_map->region_sizes[size_idx] * ib_map->size_unit;
  791. if (size != region_size)
  792. continue;
  793. if (!IS_ALIGNED(axi_addr, region_size) ||
  794. !IS_ALIGNED(pci_addr, region_size)) {
  795. dev_err(dev,
  796. "axi %pap or pci %pap not aligned\n",
  797. &axi_addr, &pci_addr);
  798. return -EINVAL;
  799. }
  800. /* Match found! Program IARR and all IMAP windows. */
  801. ret = iproc_pcie_ib_write(pcie, region_idx, size_idx,
  802. ib_map->nr_windows, axi_addr,
  803. pci_addr, size);
  804. if (ret)
  805. goto err_ib;
  806. else
  807. return 0;
  808. }
  809. }
  810. ret = -EINVAL;
  811. err_ib:
  812. dev_err(dev, "unable to configure inbound mapping\n");
  813. dev_err(dev, "axi %pap, pci %pap, res size %pap\n",
  814. &axi_addr, &pci_addr, &size);
  815. return ret;
  816. }
  817. static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
  818. struct device_node *node)
  819. {
  820. const int na = 3, ns = 2;
  821. int rlen;
  822. parser->node = node;
  823. parser->pna = of_n_addr_cells(node);
  824. parser->np = parser->pna + na + ns;
  825. parser->range = of_get_property(node, "dma-ranges", &rlen);
  826. if (!parser->range)
  827. return -ENOENT;
  828. parser->end = parser->range + rlen / sizeof(__be32);
  829. return 0;
  830. }
  831. static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
  832. {
  833. struct of_pci_range range;
  834. struct of_pci_range_parser parser;
  835. int ret;
  836. /* Get the dma-ranges from DT */
  837. ret = pci_dma_range_parser_init(&parser, pcie->dev->of_node);
  838. if (ret)
  839. return ret;
  840. for_each_of_pci_range(&parser, &range) {
  841. /* Each range entry corresponds to an inbound mapping region */
  842. ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
  843. if (ret)
  844. return ret;
  845. }
  846. return 0;
  847. }
  848. static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
  849. struct device_node *msi_node,
  850. u64 *msi_addr)
  851. {
  852. struct device *dev = pcie->dev;
  853. int ret;
  854. struct resource res;
  855. /*
  856. * Check if 'msi-map' points to ARM GICv3 ITS, which is the only
  857. * supported external MSI controller that requires steering.
  858. */
  859. if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) {
  860. dev_err(dev, "unable to find compatible MSI controller\n");
  861. return -ENODEV;
  862. }
  863. /* derive GITS_TRANSLATER address from GICv3 */
  864. ret = of_address_to_resource(msi_node, 0, &res);
  865. if (ret < 0) {
  866. dev_err(dev, "unable to obtain MSI controller resources\n");
  867. return ret;
  868. }
  869. *msi_addr = res.start + GITS_TRANSLATER;
  870. return 0;
  871. }
  872. static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
  873. {
  874. int ret;
  875. struct of_pci_range range;
  876. memset(&range, 0, sizeof(range));
  877. range.size = SZ_32K;
  878. range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
  879. ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
  880. return ret;
  881. }
  882. static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
  883. {
  884. u32 val;
  885. /*
  886. * Program bits [43:13] of address of GITS_TRANSLATER register into
  887. * bits [30:0] of the MSI base address register. In fact, in all iProc
  888. * based SoCs, all I/O register bases are well below the 32-bit
  889. * boundary, so we can safely assume bits [43:32] are always zeros.
  890. */
  891. iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR,
  892. (u32)(msi_addr >> 13));
  893. /* use a default 8K window size */
  894. iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0);
  895. /* steering MSI to GICv3 ITS */
  896. val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE);
  897. val |= GIC_V3_CFG;
  898. iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val);
  899. /*
  900. * Program bits [43:2] of address of GITS_TRANSLATER register into the
  901. * iProc MSI address registers.
  902. */
  903. msi_addr >>= 2;
  904. iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI,
  905. upper_32_bits(msi_addr));
  906. iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO,
  907. lower_32_bits(msi_addr));
  908. /* enable MSI */
  909. val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
  910. val |= MSI_ENABLE_CFG;
  911. iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
  912. }
  913. static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
  914. struct device_node *msi_node)
  915. {
  916. struct device *dev = pcie->dev;
  917. int ret;
  918. u64 msi_addr;
  919. ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr);
  920. if (ret < 0) {
  921. dev_err(dev, "msi steering failed\n");
  922. return ret;
  923. }
  924. switch (pcie->type) {
  925. case IPROC_PCIE_PAXB_V2:
  926. ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr);
  927. if (ret)
  928. return ret;
  929. break;
  930. case IPROC_PCIE_PAXC_V2:
  931. iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr);
  932. break;
  933. default:
  934. return -EINVAL;
  935. }
  936. return 0;
  937. }
  938. static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
  939. {
  940. struct device_node *msi_node;
  941. int ret;
  942. /*
  943. * Either the "msi-parent" or the "msi-map" phandle needs to exist
  944. * for us to obtain the MSI node.
  945. */
  946. msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
  947. if (!msi_node) {
  948. const __be32 *msi_map = NULL;
  949. int len;
  950. u32 phandle;
  951. msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
  952. if (!msi_map)
  953. return -ENODEV;
  954. phandle = be32_to_cpup(msi_map + 1);
  955. msi_node = of_find_node_by_phandle(phandle);
  956. if (!msi_node)
  957. return -ENODEV;
  958. }
  959. /*
  960. * Certain revisions of the iProc PCIe controller require additional
  961. * configurations to steer the MSI writes towards an external MSI
  962. * controller.
  963. */
  964. if (pcie->need_msi_steer) {
  965. ret = iproc_pcie_msi_steer(pcie, msi_node);
  966. if (ret)
  967. return ret;
  968. }
  969. /*
  970. * If another MSI controller is being used, the call below should fail
  971. * but that is okay
  972. */
  973. return iproc_msi_init(pcie, msi_node);
  974. }
  975. static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
  976. {
  977. iproc_msi_exit(pcie);
  978. }
  979. static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
  980. {
  981. struct device *dev = pcie->dev;
  982. unsigned int reg_idx;
  983. const u16 *regs;
  984. switch (pcie->type) {
  985. case IPROC_PCIE_PAXB_BCMA:
  986. regs = iproc_pcie_reg_paxb_bcma;
  987. break;
  988. case IPROC_PCIE_PAXB:
  989. regs = iproc_pcie_reg_paxb;
  990. pcie->has_apb_err_disable = true;
  991. if (pcie->need_ob_cfg) {
  992. pcie->ob_map = paxb_ob_map;
  993. pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map);
  994. }
  995. break;
  996. case IPROC_PCIE_PAXB_V2:
  997. regs = iproc_pcie_reg_paxb_v2;
  998. pcie->has_apb_err_disable = true;
  999. if (pcie->need_ob_cfg) {
  1000. pcie->ob_map = paxb_v2_ob_map;
  1001. pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map);
  1002. }
  1003. pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map);
  1004. pcie->ib_map = paxb_v2_ib_map;
  1005. pcie->need_msi_steer = true;
  1006. break;
  1007. case IPROC_PCIE_PAXC:
  1008. regs = iproc_pcie_reg_paxc;
  1009. pcie->ep_is_internal = true;
  1010. break;
  1011. case IPROC_PCIE_PAXC_V2:
  1012. regs = iproc_pcie_reg_paxc_v2;
  1013. pcie->ep_is_internal = true;
  1014. pcie->need_msi_steer = true;
  1015. break;
  1016. default:
  1017. dev_err(dev, "incompatible iProc PCIe interface\n");
  1018. return -EINVAL;
  1019. }
  1020. pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG,
  1021. sizeof(*pcie->reg_offsets),
  1022. GFP_KERNEL);
  1023. if (!pcie->reg_offsets)
  1024. return -ENOMEM;
  1025. /* go through the register table and populate all valid registers */
  1026. pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ?
  1027. IPROC_PCIE_REG_INVALID : regs[0];
  1028. for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++)
  1029. pcie->reg_offsets[reg_idx] = regs[reg_idx] ?
  1030. regs[reg_idx] : IPROC_PCIE_REG_INVALID;
  1031. return 0;
  1032. }
  1033. int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
  1034. {
  1035. struct device *dev;
  1036. int ret;
  1037. void *sysdata;
  1038. struct pci_bus *bus;
  1039. dev = pcie->dev;
  1040. ret = iproc_pcie_rev_init(pcie);
  1041. if (ret) {
  1042. dev_err(dev, "unable to initialize controller parameters\n");
  1043. return ret;
  1044. }
  1045. ret = devm_request_pci_bus_resources(dev, res);
  1046. if (ret)
  1047. return ret;
  1048. ret = phy_init(pcie->phy);
  1049. if (ret) {
  1050. dev_err(dev, "unable to initialize PCIe PHY\n");
  1051. return ret;
  1052. }
  1053. ret = phy_power_on(pcie->phy);
  1054. if (ret) {
  1055. dev_err(dev, "unable to power on PCIe PHY\n");
  1056. goto err_exit_phy;
  1057. }
  1058. iproc_pcie_reset(pcie);
  1059. if (pcie->need_ob_cfg) {
  1060. ret = iproc_pcie_map_ranges(pcie, res);
  1061. if (ret) {
  1062. dev_err(dev, "map failed\n");
  1063. goto err_power_off_phy;
  1064. }
  1065. }
  1066. ret = iproc_pcie_map_dma_ranges(pcie);
  1067. if (ret && ret != -ENOENT)
  1068. goto err_power_off_phy;
  1069. #ifdef CONFIG_ARM
  1070. pcie->sysdata.private_data = pcie;
  1071. sysdata = &pcie->sysdata;
  1072. #else
  1073. sysdata = pcie;
  1074. #endif
  1075. bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res);
  1076. if (!bus) {
  1077. dev_err(dev, "unable to create PCI root bus\n");
  1078. ret = -ENOMEM;
  1079. goto err_power_off_phy;
  1080. }
  1081. pcie->root_bus = bus;
  1082. ret = iproc_pcie_check_link(pcie, bus);
  1083. if (ret) {
  1084. dev_err(dev, "no PCIe EP device detected\n");
  1085. goto err_rm_root_bus;
  1086. }
  1087. iproc_pcie_enable(pcie);
  1088. if (IS_ENABLED(CONFIG_PCI_MSI))
  1089. if (iproc_pcie_msi_enable(pcie))
  1090. dev_info(dev, "not using iProc MSI\n");
  1091. pci_scan_child_bus(bus);
  1092. pci_assign_unassigned_bus_resources(bus);
  1093. if (pcie->map_irq)
  1094. pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
  1095. pci_bus_add_devices(bus);
  1096. return 0;
  1097. err_rm_root_bus:
  1098. pci_stop_root_bus(bus);
  1099. pci_remove_root_bus(bus);
  1100. err_power_off_phy:
  1101. phy_power_off(pcie->phy);
  1102. err_exit_phy:
  1103. phy_exit(pcie->phy);
  1104. return ret;
  1105. }
  1106. EXPORT_SYMBOL(iproc_pcie_setup);
  1107. int iproc_pcie_remove(struct iproc_pcie *pcie)
  1108. {
  1109. pci_stop_root_bus(pcie->root_bus);
  1110. pci_remove_root_bus(pcie->root_bus);
  1111. iproc_pcie_msi_disable(pcie);
  1112. phy_power_off(pcie->phy);
  1113. phy_exit(pcie->phy);
  1114. return 0;
  1115. }
  1116. EXPORT_SYMBOL(iproc_pcie_remove);
  1117. MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
  1118. MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
  1119. MODULE_LICENSE("GPL v2");