pcie-iproc.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471
  1. /*
  2. * Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de>
  3. * Copyright (C) 2015 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation version 2.
  8. *
  9. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  10. * kind, whether express or implied; without even the implied warranty
  11. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/pci.h>
  16. #include <linux/msi.h>
  17. #include <linux/clk.h>
  18. #include <linux/module.h>
  19. #include <linux/mbus.h>
  20. #include <linux/slab.h>
  21. #include <linux/delay.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/irqchip/arm-gic-v3.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/of_address.h>
  26. #include <linux/of_pci.h>
  27. #include <linux/of_irq.h>
  28. #include <linux/of_platform.h>
  29. #include <linux/phy/phy.h>
  30. #include "pcie-iproc.h"
  31. #define EP_PERST_SOURCE_SELECT_SHIFT 2
  32. #define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT)
  33. #define EP_MODE_SURVIVE_PERST_SHIFT 1
  34. #define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT)
  35. #define RC_PCIE_RST_OUTPUT_SHIFT 0
  36. #define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT)
  37. #define PAXC_RESET_MASK 0x7f
  38. #define GIC_V3_CFG_SHIFT 0
  39. #define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT)
  40. #define MSI_ENABLE_CFG_SHIFT 0
  41. #define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT)
  42. #define CFG_IND_ADDR_MASK 0x00001ffc
  43. #define CFG_ADDR_BUS_NUM_SHIFT 20
  44. #define CFG_ADDR_BUS_NUM_MASK 0x0ff00000
  45. #define CFG_ADDR_DEV_NUM_SHIFT 15
  46. #define CFG_ADDR_DEV_NUM_MASK 0x000f8000
  47. #define CFG_ADDR_FUNC_NUM_SHIFT 12
  48. #define CFG_ADDR_FUNC_NUM_MASK 0x00007000
  49. #define CFG_ADDR_REG_NUM_SHIFT 2
  50. #define CFG_ADDR_REG_NUM_MASK 0x00000ffc
  51. #define CFG_ADDR_CFG_TYPE_SHIFT 0
  52. #define CFG_ADDR_CFG_TYPE_MASK 0x00000003
  53. #define SYS_RC_INTX_MASK 0xf
  54. #define PCIE_PHYLINKUP_SHIFT 3
  55. #define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT)
  56. #define PCIE_DL_ACTIVE_SHIFT 2
  57. #define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT)
  58. #define APB_ERR_EN_SHIFT 0
  59. #define APB_ERR_EN BIT(APB_ERR_EN_SHIFT)
  60. #define CFG_RETRY_STATUS 0xffff0001
  61. #define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
  62. /* derive the enum index of the outbound/inbound mapping registers */
  63. #define MAP_REG(base_reg, index) ((base_reg) + (index) * 2)
  64. /*
  65. * Maximum number of outbound mapping window sizes that can be supported by any
  66. * OARR/OMAP mapping pair
  67. */
  68. #define MAX_NUM_OB_WINDOW_SIZES 4
  69. #define OARR_VALID_SHIFT 0
  70. #define OARR_VALID BIT(OARR_VALID_SHIFT)
  71. #define OARR_SIZE_CFG_SHIFT 1
  72. /*
  73. * Maximum number of inbound mapping region sizes that can be supported by an
  74. * IARR
  75. */
  76. #define MAX_NUM_IB_REGION_SIZES 9
  77. #define IMAP_VALID_SHIFT 0
  78. #define IMAP_VALID BIT(IMAP_VALID_SHIFT)
  79. #define IPROC_PCI_EXP_CAP 0xac
  80. #define IPROC_PCIE_REG_INVALID 0xffff
  81. /**
  82. * iProc PCIe outbound mapping controller specific parameters
  83. *
  84. * @window_sizes: list of supported outbound mapping window sizes in MB
  85. * @nr_sizes: number of supported outbound mapping window sizes
  86. */
  87. struct iproc_pcie_ob_map {
  88. resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES];
  89. unsigned int nr_sizes;
  90. };
  91. static const struct iproc_pcie_ob_map paxb_ob_map[] = {
  92. {
  93. /* OARR0/OMAP0 */
  94. .window_sizes = { 128, 256 },
  95. .nr_sizes = 2,
  96. },
  97. {
  98. /* OARR1/OMAP1 */
  99. .window_sizes = { 128, 256 },
  100. .nr_sizes = 2,
  101. },
  102. };
  103. static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = {
  104. {
  105. /* OARR0/OMAP0 */
  106. .window_sizes = { 128, 256 },
  107. .nr_sizes = 2,
  108. },
  109. {
  110. /* OARR1/OMAP1 */
  111. .window_sizes = { 128, 256 },
  112. .nr_sizes = 2,
  113. },
  114. {
  115. /* OARR2/OMAP2 */
  116. .window_sizes = { 128, 256, 512, 1024 },
  117. .nr_sizes = 4,
  118. },
  119. {
  120. /* OARR3/OMAP3 */
  121. .window_sizes = { 128, 256, 512, 1024 },
  122. .nr_sizes = 4,
  123. },
  124. };
  125. /**
  126. * iProc PCIe inbound mapping type
  127. */
  128. enum iproc_pcie_ib_map_type {
  129. /* for DDR memory */
  130. IPROC_PCIE_IB_MAP_MEM = 0,
  131. /* for device I/O memory */
  132. IPROC_PCIE_IB_MAP_IO,
  133. /* invalid or unused */
  134. IPROC_PCIE_IB_MAP_INVALID
  135. };
  136. /**
  137. * iProc PCIe inbound mapping controller specific parameters
  138. *
  139. * @type: inbound mapping region type
  140. * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
  141. * SZ_1G
  142. * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
  143. * GB, depedning on the size unit
  144. * @nr_sizes: number of supported inbound mapping region sizes
  145. * @nr_windows: number of supported inbound mapping windows for the region
  146. * @imap_addr_offset: register offset between the upper and lower 32-bit
  147. * IMAP address registers
  148. * @imap_window_offset: register offset between each IMAP window
  149. */
  150. struct iproc_pcie_ib_map {
  151. enum iproc_pcie_ib_map_type type;
  152. unsigned int size_unit;
  153. resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES];
  154. unsigned int nr_sizes;
  155. unsigned int nr_windows;
  156. u16 imap_addr_offset;
  157. u16 imap_window_offset;
  158. };
  159. static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = {
  160. {
  161. /* IARR0/IMAP0 */
  162. .type = IPROC_PCIE_IB_MAP_IO,
  163. .size_unit = SZ_1K,
  164. .region_sizes = { 32 },
  165. .nr_sizes = 1,
  166. .nr_windows = 8,
  167. .imap_addr_offset = 0x40,
  168. .imap_window_offset = 0x4,
  169. },
  170. {
  171. /* IARR1/IMAP1 (currently unused) */
  172. .type = IPROC_PCIE_IB_MAP_INVALID,
  173. },
  174. {
  175. /* IARR2/IMAP2 */
  176. .type = IPROC_PCIE_IB_MAP_MEM,
  177. .size_unit = SZ_1M,
  178. .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192,
  179. 16384 },
  180. .nr_sizes = 9,
  181. .nr_windows = 1,
  182. .imap_addr_offset = 0x4,
  183. .imap_window_offset = 0x8,
  184. },
  185. {
  186. /* IARR3/IMAP3 */
  187. .type = IPROC_PCIE_IB_MAP_MEM,
  188. .size_unit = SZ_1G,
  189. .region_sizes = { 1, 2, 4, 8, 16, 32 },
  190. .nr_sizes = 6,
  191. .nr_windows = 8,
  192. .imap_addr_offset = 0x4,
  193. .imap_window_offset = 0x8,
  194. },
  195. {
  196. /* IARR4/IMAP4 */
  197. .type = IPROC_PCIE_IB_MAP_MEM,
  198. .size_unit = SZ_1G,
  199. .region_sizes = { 32, 64, 128, 256, 512 },
  200. .nr_sizes = 5,
  201. .nr_windows = 8,
  202. .imap_addr_offset = 0x4,
  203. .imap_window_offset = 0x8,
  204. },
  205. };
  206. /*
  207. * iProc PCIe host registers
  208. */
  209. enum iproc_pcie_reg {
  210. /* clock/reset signal control */
  211. IPROC_PCIE_CLK_CTRL = 0,
  212. /*
  213. * To allow MSI to be steered to an external MSI controller (e.g., ARM
  214. * GICv3 ITS)
  215. */
  216. IPROC_PCIE_MSI_GIC_MODE,
  217. /*
  218. * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the
  219. * window where the MSI posted writes are written, for the writes to be
  220. * interpreted as MSI writes.
  221. */
  222. IPROC_PCIE_MSI_BASE_ADDR,
  223. IPROC_PCIE_MSI_WINDOW_SIZE,
  224. /*
  225. * To hold the address of the register where the MSI writes are
  226. * programed. When ARM GICv3 ITS is used, this should be programmed
  227. * with the address of the GITS_TRANSLATER register.
  228. */
  229. IPROC_PCIE_MSI_ADDR_LO,
  230. IPROC_PCIE_MSI_ADDR_HI,
  231. /* enable MSI */
  232. IPROC_PCIE_MSI_EN_CFG,
  233. /* allow access to root complex configuration space */
  234. IPROC_PCIE_CFG_IND_ADDR,
  235. IPROC_PCIE_CFG_IND_DATA,
  236. /* allow access to device configuration space */
  237. IPROC_PCIE_CFG_ADDR,
  238. IPROC_PCIE_CFG_DATA,
  239. /* enable INTx */
  240. IPROC_PCIE_INTX_EN,
  241. /* outbound address mapping */
  242. IPROC_PCIE_OARR0,
  243. IPROC_PCIE_OMAP0,
  244. IPROC_PCIE_OARR1,
  245. IPROC_PCIE_OMAP1,
  246. IPROC_PCIE_OARR2,
  247. IPROC_PCIE_OMAP2,
  248. IPROC_PCIE_OARR3,
  249. IPROC_PCIE_OMAP3,
  250. /* inbound address mapping */
  251. IPROC_PCIE_IARR0,
  252. IPROC_PCIE_IMAP0,
  253. IPROC_PCIE_IARR1,
  254. IPROC_PCIE_IMAP1,
  255. IPROC_PCIE_IARR2,
  256. IPROC_PCIE_IMAP2,
  257. IPROC_PCIE_IARR3,
  258. IPROC_PCIE_IMAP3,
  259. IPROC_PCIE_IARR4,
  260. IPROC_PCIE_IMAP4,
  261. /* link status */
  262. IPROC_PCIE_LINK_STATUS,
  263. /* enable APB error for unsupported requests */
  264. IPROC_PCIE_APB_ERR_EN,
  265. /* total number of core registers */
  266. IPROC_PCIE_MAX_NUM_REG,
  267. };
  268. /* iProc PCIe PAXB BCMA registers */
  269. static const u16 iproc_pcie_reg_paxb_bcma[] = {
  270. [IPROC_PCIE_CLK_CTRL] = 0x000,
  271. [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
  272. [IPROC_PCIE_CFG_IND_DATA] = 0x124,
  273. [IPROC_PCIE_CFG_ADDR] = 0x1f8,
  274. [IPROC_PCIE_CFG_DATA] = 0x1fc,
  275. [IPROC_PCIE_INTX_EN] = 0x330,
  276. [IPROC_PCIE_LINK_STATUS] = 0xf0c,
  277. };
  278. /* iProc PCIe PAXB registers */
  279. static const u16 iproc_pcie_reg_paxb[] = {
  280. [IPROC_PCIE_CLK_CTRL] = 0x000,
  281. [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
  282. [IPROC_PCIE_CFG_IND_DATA] = 0x124,
  283. [IPROC_PCIE_CFG_ADDR] = 0x1f8,
  284. [IPROC_PCIE_CFG_DATA] = 0x1fc,
  285. [IPROC_PCIE_INTX_EN] = 0x330,
  286. [IPROC_PCIE_OARR0] = 0xd20,
  287. [IPROC_PCIE_OMAP0] = 0xd40,
  288. [IPROC_PCIE_OARR1] = 0xd28,
  289. [IPROC_PCIE_OMAP1] = 0xd48,
  290. [IPROC_PCIE_LINK_STATUS] = 0xf0c,
  291. [IPROC_PCIE_APB_ERR_EN] = 0xf40,
  292. };
  293. /* iProc PCIe PAXB v2 registers */
  294. static const u16 iproc_pcie_reg_paxb_v2[] = {
  295. [IPROC_PCIE_CLK_CTRL] = 0x000,
  296. [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
  297. [IPROC_PCIE_CFG_IND_DATA] = 0x124,
  298. [IPROC_PCIE_CFG_ADDR] = 0x1f8,
  299. [IPROC_PCIE_CFG_DATA] = 0x1fc,
  300. [IPROC_PCIE_INTX_EN] = 0x330,
  301. [IPROC_PCIE_OARR0] = 0xd20,
  302. [IPROC_PCIE_OMAP0] = 0xd40,
  303. [IPROC_PCIE_OARR1] = 0xd28,
  304. [IPROC_PCIE_OMAP1] = 0xd48,
  305. [IPROC_PCIE_OARR2] = 0xd60,
  306. [IPROC_PCIE_OMAP2] = 0xd68,
  307. [IPROC_PCIE_OARR3] = 0xdf0,
  308. [IPROC_PCIE_OMAP3] = 0xdf8,
  309. [IPROC_PCIE_IARR0] = 0xd00,
  310. [IPROC_PCIE_IMAP0] = 0xc00,
  311. [IPROC_PCIE_IARR2] = 0xd10,
  312. [IPROC_PCIE_IMAP2] = 0xcc0,
  313. [IPROC_PCIE_IARR3] = 0xe00,
  314. [IPROC_PCIE_IMAP3] = 0xe08,
  315. [IPROC_PCIE_IARR4] = 0xe68,
  316. [IPROC_PCIE_IMAP4] = 0xe70,
  317. [IPROC_PCIE_LINK_STATUS] = 0xf0c,
  318. [IPROC_PCIE_APB_ERR_EN] = 0xf40,
  319. };
  320. /* iProc PCIe PAXC v1 registers */
  321. static const u16 iproc_pcie_reg_paxc[] = {
  322. [IPROC_PCIE_CLK_CTRL] = 0x000,
  323. [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
  324. [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
  325. [IPROC_PCIE_CFG_ADDR] = 0x1f8,
  326. [IPROC_PCIE_CFG_DATA] = 0x1fc,
  327. };
  328. /* iProc PCIe PAXC v2 registers */
  329. static const u16 iproc_pcie_reg_paxc_v2[] = {
  330. [IPROC_PCIE_MSI_GIC_MODE] = 0x050,
  331. [IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
  332. [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
  333. [IPROC_PCIE_MSI_ADDR_LO] = 0x07c,
  334. [IPROC_PCIE_MSI_ADDR_HI] = 0x080,
  335. [IPROC_PCIE_MSI_EN_CFG] = 0x09c,
  336. [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
  337. [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
  338. [IPROC_PCIE_CFG_ADDR] = 0x1f8,
  339. [IPROC_PCIE_CFG_DATA] = 0x1fc,
  340. };
  341. static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
  342. {
  343. struct iproc_pcie *pcie;
  344. #ifdef CONFIG_ARM
  345. struct pci_sys_data *sys = bus->sysdata;
  346. pcie = sys->private_data;
  347. #else
  348. pcie = bus->sysdata;
  349. #endif
  350. return pcie;
  351. }
  352. static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset)
  353. {
  354. return !!(reg_offset == IPROC_PCIE_REG_INVALID);
  355. }
  356. static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie,
  357. enum iproc_pcie_reg reg)
  358. {
  359. return pcie->reg_offsets[reg];
  360. }
  361. static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie,
  362. enum iproc_pcie_reg reg)
  363. {
  364. u16 offset = iproc_pcie_reg_offset(pcie, reg);
  365. if (iproc_pcie_reg_is_invalid(offset))
  366. return 0;
  367. return readl(pcie->base + offset);
  368. }
  369. static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
  370. enum iproc_pcie_reg reg, u32 val)
  371. {
  372. u16 offset = iproc_pcie_reg_offset(pcie, reg);
  373. if (iproc_pcie_reg_is_invalid(offset))
  374. return;
  375. writel(val, pcie->base + offset);
  376. }
  377. /**
  378. * APB error forwarding can be disabled during access of configuration
  379. * registers of the endpoint device, to prevent unsupported requests
  380. * (typically seen during enumeration with multi-function devices) from
  381. * triggering a system exception.
  382. */
  383. static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
  384. bool disable)
  385. {
  386. struct iproc_pcie *pcie = iproc_data(bus);
  387. u32 val;
  388. if (bus->number && pcie->has_apb_err_disable) {
  389. val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN);
  390. if (disable)
  391. val &= ~APB_ERR_EN;
  392. else
  393. val |= APB_ERR_EN;
  394. iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val);
  395. }
  396. }
  397. static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie,
  398. unsigned int busno,
  399. unsigned int slot,
  400. unsigned int fn,
  401. int where)
  402. {
  403. u16 offset;
  404. u32 val;
  405. /* EP device access */
  406. val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
  407. (slot << CFG_ADDR_DEV_NUM_SHIFT) |
  408. (fn << CFG_ADDR_FUNC_NUM_SHIFT) |
  409. (where & CFG_ADDR_REG_NUM_MASK) |
  410. (1 & CFG_ADDR_CFG_TYPE_MASK);
  411. iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val);
  412. offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA);
  413. if (iproc_pcie_reg_is_invalid(offset))
  414. return NULL;
  415. return (pcie->base + offset);
  416. }
  417. static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p)
  418. {
  419. int timeout = CFG_RETRY_STATUS_TIMEOUT_US;
  420. unsigned int data;
  421. /*
  422. * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
  423. * affects config reads of the Vendor ID. For config writes or any
  424. * other config reads, the Root may automatically reissue the
  425. * configuration request again as a new request.
  426. *
  427. * For config reads, this hardware returns CFG_RETRY_STATUS data
  428. * when it receives a CRS completion, regardless of the address of
  429. * the read or the CRS Software Visibility Enable bit. As a
  430. * partial workaround for this, we retry in software any read that
  431. * returns CFG_RETRY_STATUS.
  432. *
  433. * Note that a non-Vendor ID config register may have a value of
  434. * CFG_RETRY_STATUS. If we read that, we can't distinguish it from
  435. * a CRS completion, so we will incorrectly retry the read and
  436. * eventually return the wrong data (0xffffffff).
  437. */
  438. data = readl(cfg_data_p);
  439. while (data == CFG_RETRY_STATUS && timeout--) {
  440. udelay(1);
  441. data = readl(cfg_data_p);
  442. }
  443. if (data == CFG_RETRY_STATUS)
  444. data = 0xffffffff;
  445. return data;
  446. }
  447. static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
  448. int where, int size, u32 *val)
  449. {
  450. struct iproc_pcie *pcie = iproc_data(bus);
  451. unsigned int slot = PCI_SLOT(devfn);
  452. unsigned int fn = PCI_FUNC(devfn);
  453. unsigned int busno = bus->number;
  454. void __iomem *cfg_data_p;
  455. unsigned int data;
  456. int ret;
  457. /* root complex access */
  458. if (busno == 0) {
  459. ret = pci_generic_config_read32(bus, devfn, where, size, val);
  460. if (ret != PCIBIOS_SUCCESSFUL)
  461. return ret;
  462. /* Don't advertise CRS SV support */
  463. if ((where & ~0x3) == IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL)
  464. *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
  465. return PCIBIOS_SUCCESSFUL;
  466. }
  467. cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
  468. if (!cfg_data_p)
  469. return PCIBIOS_DEVICE_NOT_FOUND;
  470. data = iproc_pcie_cfg_retry(cfg_data_p);
  471. *val = data;
  472. if (size <= 2)
  473. *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
  474. return PCIBIOS_SUCCESSFUL;
  475. }
  476. /**
  477. * Note access to the configuration registers are protected at the higher layer
  478. * by 'pci_lock' in drivers/pci/access.c
  479. */
  480. static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
  481. int busno, unsigned int devfn,
  482. int where)
  483. {
  484. unsigned slot = PCI_SLOT(devfn);
  485. unsigned fn = PCI_FUNC(devfn);
  486. u16 offset;
  487. /* root complex access */
  488. if (busno == 0) {
  489. if (slot > 0 || fn > 0)
  490. return NULL;
  491. iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
  492. where & CFG_IND_ADDR_MASK);
  493. offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
  494. if (iproc_pcie_reg_is_invalid(offset))
  495. return NULL;
  496. else
  497. return (pcie->base + offset);
  498. }
  499. /*
  500. * PAXC is connected to an internally emulated EP within the SoC. It
  501. * allows only one device.
  502. */
  503. if (pcie->ep_is_internal)
  504. if (slot > 0)
  505. return NULL;
  506. return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
  507. }
  508. static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus,
  509. unsigned int devfn,
  510. int where)
  511. {
  512. return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn,
  513. where);
  514. }
  515. static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie,
  516. unsigned int devfn, int where,
  517. int size, u32 *val)
  518. {
  519. void __iomem *addr;
  520. addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
  521. if (!addr) {
  522. *val = ~0;
  523. return PCIBIOS_DEVICE_NOT_FOUND;
  524. }
  525. *val = readl(addr);
  526. if (size <= 2)
  527. *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
  528. return PCIBIOS_SUCCESSFUL;
  529. }
  530. static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie,
  531. unsigned int devfn, int where,
  532. int size, u32 val)
  533. {
  534. void __iomem *addr;
  535. u32 mask, tmp;
  536. addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
  537. if (!addr)
  538. return PCIBIOS_DEVICE_NOT_FOUND;
  539. if (size == 4) {
  540. writel(val, addr);
  541. return PCIBIOS_SUCCESSFUL;
  542. }
  543. mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
  544. tmp = readl(addr) & mask;
  545. tmp |= val << ((where & 0x3) * 8);
  546. writel(tmp, addr);
  547. return PCIBIOS_SUCCESSFUL;
  548. }
  549. static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
  550. int where, int size, u32 *val)
  551. {
  552. int ret;
  553. struct iproc_pcie *pcie = iproc_data(bus);
  554. iproc_pcie_apb_err_disable(bus, true);
  555. if (pcie->type == IPROC_PCIE_PAXB_V2)
  556. ret = iproc_pcie_config_read(bus, devfn, where, size, val);
  557. else
  558. ret = pci_generic_config_read32(bus, devfn, where, size, val);
  559. iproc_pcie_apb_err_disable(bus, false);
  560. return ret;
  561. }
  562. static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
  563. int where, int size, u32 val)
  564. {
  565. int ret;
  566. iproc_pcie_apb_err_disable(bus, true);
  567. ret = pci_generic_config_write32(bus, devfn, where, size, val);
  568. iproc_pcie_apb_err_disable(bus, false);
  569. return ret;
  570. }
  571. static struct pci_ops iproc_pcie_ops = {
  572. .map_bus = iproc_pcie_bus_map_cfg_bus,
  573. .read = iproc_pcie_config_read32,
  574. .write = iproc_pcie_config_write32,
  575. };
  576. static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert)
  577. {
  578. u32 val;
  579. /*
  580. * PAXC and the internal emulated endpoint device downstream should not
  581. * be reset. If firmware has been loaded on the endpoint device at an
  582. * earlier boot stage, reset here causes issues.
  583. */
  584. if (pcie->ep_is_internal)
  585. return;
  586. if (assert) {
  587. val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
  588. val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
  589. ~RC_PCIE_RST_OUTPUT;
  590. iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
  591. udelay(250);
  592. } else {
  593. val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
  594. val |= RC_PCIE_RST_OUTPUT;
  595. iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
  596. msleep(100);
  597. }
  598. }
  599. int iproc_pcie_shutdown(struct iproc_pcie *pcie)
  600. {
  601. iproc_pcie_perst_ctrl(pcie, true);
  602. msleep(500);
  603. return 0;
  604. }
  605. EXPORT_SYMBOL_GPL(iproc_pcie_shutdown);
  606. static int iproc_pcie_check_link(struct iproc_pcie *pcie)
  607. {
  608. struct device *dev = pcie->dev;
  609. u32 hdr_type, link_ctrl, link_status, class, val;
  610. bool link_is_active = false;
  611. /*
  612. * PAXC connects to emulated endpoint devices directly and does not
  613. * have a Serdes. Therefore skip the link detection logic here.
  614. */
  615. if (pcie->ep_is_internal)
  616. return 0;
  617. val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
  618. if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
  619. dev_err(dev, "PHY or data link is INACTIVE!\n");
  620. return -ENODEV;
  621. }
  622. /* make sure we are not in EP mode */
  623. iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type);
  624. if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
  625. dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
  626. return -EFAULT;
  627. }
  628. /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
  629. #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
  630. #define PCI_CLASS_BRIDGE_MASK 0xffff00
  631. #define PCI_CLASS_BRIDGE_SHIFT 8
  632. iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
  633. 4, &class);
  634. class &= ~PCI_CLASS_BRIDGE_MASK;
  635. class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
  636. iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
  637. 4, class);
  638. /* check link status to see if link is active */
  639. iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
  640. 2, &link_status);
  641. if (link_status & PCI_EXP_LNKSTA_NLW)
  642. link_is_active = true;
  643. if (!link_is_active) {
  644. /* try GEN 1 link speed */
  645. #define PCI_TARGET_LINK_SPEED_MASK 0xf
  646. #define PCI_TARGET_LINK_SPEED_GEN2 0x2
  647. #define PCI_TARGET_LINK_SPEED_GEN1 0x1
  648. iproc_pci_raw_config_read32(pcie, 0,
  649. IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
  650. 4, &link_ctrl);
  651. if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
  652. PCI_TARGET_LINK_SPEED_GEN2) {
  653. link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
  654. link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
  655. iproc_pci_raw_config_write32(pcie, 0,
  656. IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
  657. 4, link_ctrl);
  658. msleep(100);
  659. iproc_pci_raw_config_read32(pcie, 0,
  660. IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
  661. 2, &link_status);
  662. if (link_status & PCI_EXP_LNKSTA_NLW)
  663. link_is_active = true;
  664. }
  665. }
  666. dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
  667. return link_is_active ? 0 : -ENODEV;
  668. }
  669. static void iproc_pcie_enable(struct iproc_pcie *pcie)
  670. {
  671. iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK);
  672. }
  673. static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie,
  674. int window_idx)
  675. {
  676. u32 val;
  677. val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx));
  678. return !!(val & OARR_VALID);
  679. }
  680. static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
  681. int size_idx, u64 axi_addr, u64 pci_addr)
  682. {
  683. struct device *dev = pcie->dev;
  684. u16 oarr_offset, omap_offset;
  685. /*
  686. * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based
  687. * on window index.
  688. */
  689. oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0,
  690. window_idx));
  691. omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0,
  692. window_idx));
  693. if (iproc_pcie_reg_is_invalid(oarr_offset) ||
  694. iproc_pcie_reg_is_invalid(omap_offset))
  695. return -EINVAL;
  696. /*
  697. * Program the OARR registers. The upper 32-bit OARR register is
  698. * always right after the lower 32-bit OARR register.
  699. */
  700. writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) |
  701. OARR_VALID, pcie->base + oarr_offset);
  702. writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4);
  703. /* now program the OMAP registers */
  704. writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
  705. writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
  706. dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
  707. window_idx, oarr_offset, &axi_addr, &pci_addr);
  708. dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n",
  709. readl(pcie->base + oarr_offset),
  710. readl(pcie->base + oarr_offset + 4));
  711. dev_info(dev, "omap lo 0x%x omap hi 0x%x\n",
  712. readl(pcie->base + omap_offset),
  713. readl(pcie->base + omap_offset + 4));
  714. return 0;
  715. }
  716. /**
  717. * Some iProc SoCs require the SW to configure the outbound address mapping
  718. *
  719. * Outbound address translation:
  720. *
  721. * iproc_pcie_address = axi_address - axi_offset
  722. * OARR = iproc_pcie_address
  723. * OMAP = pci_addr
  724. *
  725. * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address
  726. */
  727. static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
  728. u64 pci_addr, resource_size_t size)
  729. {
  730. struct iproc_pcie_ob *ob = &pcie->ob;
  731. struct device *dev = pcie->dev;
  732. int ret = -EINVAL, window_idx, size_idx;
  733. if (axi_addr < ob->axi_offset) {
  734. dev_err(dev, "axi address %pap less than offset %pap\n",
  735. &axi_addr, &ob->axi_offset);
  736. return -EINVAL;
  737. }
  738. /*
  739. * Translate the AXI address to the internal address used by the iProc
  740. * PCIe core before programming the OARR
  741. */
  742. axi_addr -= ob->axi_offset;
  743. /* iterate through all OARR/OMAP mapping windows */
  744. for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) {
  745. const struct iproc_pcie_ob_map *ob_map =
  746. &pcie->ob_map[window_idx];
  747. /*
  748. * If current outbound window is already in use, move on to the
  749. * next one.
  750. */
  751. if (iproc_pcie_ob_is_valid(pcie, window_idx))
  752. continue;
  753. /*
  754. * Iterate through all supported window sizes within the
  755. * OARR/OMAP pair to find a match. Go through the window sizes
  756. * in a descending order.
  757. */
  758. for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0;
  759. size_idx--) {
  760. resource_size_t window_size =
  761. ob_map->window_sizes[size_idx] * SZ_1M;
  762. if (size < window_size)
  763. continue;
  764. if (!IS_ALIGNED(axi_addr, window_size) ||
  765. !IS_ALIGNED(pci_addr, window_size)) {
  766. dev_err(dev,
  767. "axi %pap or pci %pap not aligned\n",
  768. &axi_addr, &pci_addr);
  769. return -EINVAL;
  770. }
  771. /*
  772. * Match found! Program both OARR and OMAP and mark
  773. * them as a valid entry.
  774. */
  775. ret = iproc_pcie_ob_write(pcie, window_idx, size_idx,
  776. axi_addr, pci_addr);
  777. if (ret)
  778. goto err_ob;
  779. size -= window_size;
  780. if (size == 0)
  781. return 0;
  782. /*
  783. * If we are here, we are done with the current window,
  784. * but not yet finished all mappings. Need to move on
  785. * to the next window.
  786. */
  787. axi_addr += window_size;
  788. pci_addr += window_size;
  789. break;
  790. }
  791. }
  792. err_ob:
  793. dev_err(dev, "unable to configure outbound mapping\n");
  794. dev_err(dev,
  795. "axi %pap, axi offset %pap, pci %pap, res size %pap\n",
  796. &axi_addr, &ob->axi_offset, &pci_addr, &size);
  797. return ret;
  798. }
  799. static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
  800. struct list_head *resources)
  801. {
  802. struct device *dev = pcie->dev;
  803. struct resource_entry *window;
  804. int ret;
  805. resource_list_for_each_entry(window, resources) {
  806. struct resource *res = window->res;
  807. u64 res_type = resource_type(res);
  808. switch (res_type) {
  809. case IORESOURCE_IO:
  810. case IORESOURCE_BUS:
  811. break;
  812. case IORESOURCE_MEM:
  813. ret = iproc_pcie_setup_ob(pcie, res->start,
  814. res->start - window->offset,
  815. resource_size(res));
  816. if (ret)
  817. return ret;
  818. break;
  819. default:
  820. dev_err(dev, "invalid resource %pR\n", res);
  821. return -EINVAL;
  822. }
  823. }
  824. return 0;
  825. }
  826. static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie,
  827. int region_idx)
  828. {
  829. const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
  830. u32 val;
  831. val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx));
  832. return !!(val & (BIT(ib_map->nr_sizes) - 1));
  833. }
  834. static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map,
  835. enum iproc_pcie_ib_map_type type)
  836. {
  837. return !!(ib_map->type == type);
  838. }
  839. static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
  840. int size_idx, int nr_windows, u64 axi_addr,
  841. u64 pci_addr, resource_size_t size)
  842. {
  843. struct device *dev = pcie->dev;
  844. const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
  845. u16 iarr_offset, imap_offset;
  846. u32 val;
  847. int window_idx;
  848. iarr_offset = iproc_pcie_reg_offset(pcie,
  849. MAP_REG(IPROC_PCIE_IARR0, region_idx));
  850. imap_offset = iproc_pcie_reg_offset(pcie,
  851. MAP_REG(IPROC_PCIE_IMAP0, region_idx));
  852. if (iproc_pcie_reg_is_invalid(iarr_offset) ||
  853. iproc_pcie_reg_is_invalid(imap_offset))
  854. return -EINVAL;
  855. dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
  856. region_idx, iarr_offset, &axi_addr, &pci_addr);
  857. /*
  858. * Program the IARR registers. The upper 32-bit IARR register is
  859. * always right after the lower 32-bit IARR register.
  860. */
  861. writel(lower_32_bits(pci_addr) | BIT(size_idx),
  862. pcie->base + iarr_offset);
  863. writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
  864. dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n",
  865. readl(pcie->base + iarr_offset),
  866. readl(pcie->base + iarr_offset + 4));
  867. /*
  868. * Now program the IMAP registers. Each IARR region may have one or
  869. * more IMAP windows.
  870. */
  871. size >>= ilog2(nr_windows);
  872. for (window_idx = 0; window_idx < nr_windows; window_idx++) {
  873. val = readl(pcie->base + imap_offset);
  874. val |= lower_32_bits(axi_addr) | IMAP_VALID;
  875. writel(val, pcie->base + imap_offset);
  876. writel(upper_32_bits(axi_addr),
  877. pcie->base + imap_offset + ib_map->imap_addr_offset);
  878. dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
  879. window_idx, readl(pcie->base + imap_offset),
  880. readl(pcie->base + imap_offset +
  881. ib_map->imap_addr_offset));
  882. imap_offset += ib_map->imap_window_offset;
  883. axi_addr += size;
  884. }
  885. return 0;
  886. }
  887. static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
  888. struct of_pci_range *range,
  889. enum iproc_pcie_ib_map_type type)
  890. {
  891. struct device *dev = pcie->dev;
  892. struct iproc_pcie_ib *ib = &pcie->ib;
  893. int ret;
  894. unsigned int region_idx, size_idx;
  895. u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
  896. resource_size_t size = range->size;
  897. /* iterate through all IARR mapping regions */
  898. for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
  899. const struct iproc_pcie_ib_map *ib_map =
  900. &pcie->ib_map[region_idx];
  901. /*
  902. * If current inbound region is already in use or not a
  903. * compatible type, move on to the next.
  904. */
  905. if (iproc_pcie_ib_is_in_use(pcie, region_idx) ||
  906. !iproc_pcie_ib_check_type(ib_map, type))
  907. continue;
  908. /* iterate through all supported region sizes to find a match */
  909. for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) {
  910. resource_size_t region_size =
  911. ib_map->region_sizes[size_idx] * ib_map->size_unit;
  912. if (size != region_size)
  913. continue;
  914. if (!IS_ALIGNED(axi_addr, region_size) ||
  915. !IS_ALIGNED(pci_addr, region_size)) {
  916. dev_err(dev,
  917. "axi %pap or pci %pap not aligned\n",
  918. &axi_addr, &pci_addr);
  919. return -EINVAL;
  920. }
  921. /* Match found! Program IARR and all IMAP windows. */
  922. ret = iproc_pcie_ib_write(pcie, region_idx, size_idx,
  923. ib_map->nr_windows, axi_addr,
  924. pci_addr, size);
  925. if (ret)
  926. goto err_ib;
  927. else
  928. return 0;
  929. }
  930. }
  931. ret = -EINVAL;
  932. err_ib:
  933. dev_err(dev, "unable to configure inbound mapping\n");
  934. dev_err(dev, "axi %pap, pci %pap, res size %pap\n",
  935. &axi_addr, &pci_addr, &size);
  936. return ret;
  937. }
  938. static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
  939. struct device_node *node)
  940. {
  941. const int na = 3, ns = 2;
  942. int rlen;
  943. parser->node = node;
  944. parser->pna = of_n_addr_cells(node);
  945. parser->np = parser->pna + na + ns;
  946. parser->range = of_get_property(node, "dma-ranges", &rlen);
  947. if (!parser->range)
  948. return -ENOENT;
  949. parser->end = parser->range + rlen / sizeof(__be32);
  950. return 0;
  951. }
  952. static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
  953. {
  954. struct of_pci_range range;
  955. struct of_pci_range_parser parser;
  956. int ret;
  957. /* Get the dma-ranges from DT */
  958. ret = pci_dma_range_parser_init(&parser, pcie->dev->of_node);
  959. if (ret)
  960. return ret;
  961. for_each_of_pci_range(&parser, &range) {
  962. /* Each range entry corresponds to an inbound mapping region */
  963. ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
  964. if (ret)
  965. return ret;
  966. }
  967. return 0;
  968. }
  969. static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
  970. struct device_node *msi_node,
  971. u64 *msi_addr)
  972. {
  973. struct device *dev = pcie->dev;
  974. int ret;
  975. struct resource res;
  976. /*
  977. * Check if 'msi-map' points to ARM GICv3 ITS, which is the only
  978. * supported external MSI controller that requires steering.
  979. */
  980. if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) {
  981. dev_err(dev, "unable to find compatible MSI controller\n");
  982. return -ENODEV;
  983. }
  984. /* derive GITS_TRANSLATER address from GICv3 */
  985. ret = of_address_to_resource(msi_node, 0, &res);
  986. if (ret < 0) {
  987. dev_err(dev, "unable to obtain MSI controller resources\n");
  988. return ret;
  989. }
  990. *msi_addr = res.start + GITS_TRANSLATER;
  991. return 0;
  992. }
  993. static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
  994. {
  995. int ret;
  996. struct of_pci_range range;
  997. memset(&range, 0, sizeof(range));
  998. range.size = SZ_32K;
  999. range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
  1000. ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
  1001. return ret;
  1002. }
  1003. static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
  1004. {
  1005. u32 val;
  1006. /*
  1007. * Program bits [43:13] of address of GITS_TRANSLATER register into
  1008. * bits [30:0] of the MSI base address register. In fact, in all iProc
  1009. * based SoCs, all I/O register bases are well below the 32-bit
  1010. * boundary, so we can safely assume bits [43:32] are always zeros.
  1011. */
  1012. iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR,
  1013. (u32)(msi_addr >> 13));
  1014. /* use a default 8K window size */
  1015. iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0);
  1016. /* steering MSI to GICv3 ITS */
  1017. val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE);
  1018. val |= GIC_V3_CFG;
  1019. iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val);
  1020. /*
  1021. * Program bits [43:2] of address of GITS_TRANSLATER register into the
  1022. * iProc MSI address registers.
  1023. */
  1024. msi_addr >>= 2;
  1025. iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI,
  1026. upper_32_bits(msi_addr));
  1027. iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO,
  1028. lower_32_bits(msi_addr));
  1029. /* enable MSI */
  1030. val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
  1031. val |= MSI_ENABLE_CFG;
  1032. iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
  1033. }
  1034. static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
  1035. struct device_node *msi_node)
  1036. {
  1037. struct device *dev = pcie->dev;
  1038. int ret;
  1039. u64 msi_addr;
  1040. ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr);
  1041. if (ret < 0) {
  1042. dev_err(dev, "msi steering failed\n");
  1043. return ret;
  1044. }
  1045. switch (pcie->type) {
  1046. case IPROC_PCIE_PAXB_V2:
  1047. ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr);
  1048. if (ret)
  1049. return ret;
  1050. break;
  1051. case IPROC_PCIE_PAXC_V2:
  1052. iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr);
  1053. break;
  1054. default:
  1055. return -EINVAL;
  1056. }
  1057. return 0;
  1058. }
  1059. static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
  1060. {
  1061. struct device_node *msi_node;
  1062. int ret;
  1063. /*
  1064. * Either the "msi-parent" or the "msi-map" phandle needs to exist
  1065. * for us to obtain the MSI node.
  1066. */
  1067. msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
  1068. if (!msi_node) {
  1069. const __be32 *msi_map = NULL;
  1070. int len;
  1071. u32 phandle;
  1072. msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
  1073. if (!msi_map)
  1074. return -ENODEV;
  1075. phandle = be32_to_cpup(msi_map + 1);
  1076. msi_node = of_find_node_by_phandle(phandle);
  1077. if (!msi_node)
  1078. return -ENODEV;
  1079. }
  1080. /*
  1081. * Certain revisions of the iProc PCIe controller require additional
  1082. * configurations to steer the MSI writes towards an external MSI
  1083. * controller.
  1084. */
  1085. if (pcie->need_msi_steer) {
  1086. ret = iproc_pcie_msi_steer(pcie, msi_node);
  1087. if (ret)
  1088. return ret;
  1089. }
  1090. /*
  1091. * If another MSI controller is being used, the call below should fail
  1092. * but that is okay
  1093. */
  1094. return iproc_msi_init(pcie, msi_node);
  1095. }
  1096. static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
  1097. {
  1098. iproc_msi_exit(pcie);
  1099. }
  1100. static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
  1101. {
  1102. struct device *dev = pcie->dev;
  1103. unsigned int reg_idx;
  1104. const u16 *regs;
  1105. switch (pcie->type) {
  1106. case IPROC_PCIE_PAXB_BCMA:
  1107. regs = iproc_pcie_reg_paxb_bcma;
  1108. break;
  1109. case IPROC_PCIE_PAXB:
  1110. regs = iproc_pcie_reg_paxb;
  1111. pcie->has_apb_err_disable = true;
  1112. if (pcie->need_ob_cfg) {
  1113. pcie->ob_map = paxb_ob_map;
  1114. pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map);
  1115. }
  1116. break;
  1117. case IPROC_PCIE_PAXB_V2:
  1118. regs = iproc_pcie_reg_paxb_v2;
  1119. pcie->has_apb_err_disable = true;
  1120. if (pcie->need_ob_cfg) {
  1121. pcie->ob_map = paxb_v2_ob_map;
  1122. pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map);
  1123. }
  1124. pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map);
  1125. pcie->ib_map = paxb_v2_ib_map;
  1126. pcie->need_msi_steer = true;
  1127. dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n",
  1128. CFG_RETRY_STATUS);
  1129. break;
  1130. case IPROC_PCIE_PAXC:
  1131. regs = iproc_pcie_reg_paxc;
  1132. pcie->ep_is_internal = true;
  1133. break;
  1134. case IPROC_PCIE_PAXC_V2:
  1135. regs = iproc_pcie_reg_paxc_v2;
  1136. pcie->ep_is_internal = true;
  1137. pcie->need_msi_steer = true;
  1138. break;
  1139. default:
  1140. dev_err(dev, "incompatible iProc PCIe interface\n");
  1141. return -EINVAL;
  1142. }
  1143. pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG,
  1144. sizeof(*pcie->reg_offsets),
  1145. GFP_KERNEL);
  1146. if (!pcie->reg_offsets)
  1147. return -ENOMEM;
  1148. /* go through the register table and populate all valid registers */
  1149. pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ?
  1150. IPROC_PCIE_REG_INVALID : regs[0];
  1151. for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++)
  1152. pcie->reg_offsets[reg_idx] = regs[reg_idx] ?
  1153. regs[reg_idx] : IPROC_PCIE_REG_INVALID;
  1154. return 0;
  1155. }
  1156. int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
  1157. {
  1158. struct device *dev;
  1159. int ret;
  1160. void *sysdata;
  1161. struct pci_bus *child;
  1162. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  1163. dev = pcie->dev;
  1164. ret = iproc_pcie_rev_init(pcie);
  1165. if (ret) {
  1166. dev_err(dev, "unable to initialize controller parameters\n");
  1167. return ret;
  1168. }
  1169. ret = devm_request_pci_bus_resources(dev, res);
  1170. if (ret)
  1171. return ret;
  1172. ret = phy_init(pcie->phy);
  1173. if (ret) {
  1174. dev_err(dev, "unable to initialize PCIe PHY\n");
  1175. return ret;
  1176. }
  1177. ret = phy_power_on(pcie->phy);
  1178. if (ret) {
  1179. dev_err(dev, "unable to power on PCIe PHY\n");
  1180. goto err_exit_phy;
  1181. }
  1182. iproc_pcie_perst_ctrl(pcie, true);
  1183. iproc_pcie_perst_ctrl(pcie, false);
  1184. if (pcie->need_ob_cfg) {
  1185. ret = iproc_pcie_map_ranges(pcie, res);
  1186. if (ret) {
  1187. dev_err(dev, "map failed\n");
  1188. goto err_power_off_phy;
  1189. }
  1190. }
  1191. ret = iproc_pcie_map_dma_ranges(pcie);
  1192. if (ret && ret != -ENOENT)
  1193. goto err_power_off_phy;
  1194. #ifdef CONFIG_ARM
  1195. pcie->sysdata.private_data = pcie;
  1196. sysdata = &pcie->sysdata;
  1197. #else
  1198. sysdata = pcie;
  1199. #endif
  1200. ret = iproc_pcie_check_link(pcie);
  1201. if (ret) {
  1202. dev_err(dev, "no PCIe EP device detected\n");
  1203. goto err_power_off_phy;
  1204. }
  1205. iproc_pcie_enable(pcie);
  1206. if (IS_ENABLED(CONFIG_PCI_MSI))
  1207. if (iproc_pcie_msi_enable(pcie))
  1208. dev_info(dev, "not using iProc MSI\n");
  1209. list_splice_init(res, &host->windows);
  1210. host->busnr = 0;
  1211. host->dev.parent = dev;
  1212. host->ops = &iproc_pcie_ops;
  1213. host->sysdata = sysdata;
  1214. host->map_irq = pcie->map_irq;
  1215. host->swizzle_irq = pci_common_swizzle;
  1216. ret = pci_scan_root_bus_bridge(host);
  1217. if (ret < 0) {
  1218. dev_err(dev, "failed to scan host: %d\n", ret);
  1219. goto err_power_off_phy;
  1220. }
  1221. pci_assign_unassigned_bus_resources(host->bus);
  1222. pcie->root_bus = host->bus;
  1223. list_for_each_entry(child, &host->bus->children, node)
  1224. pcie_bus_configure_settings(child);
  1225. pci_bus_add_devices(host->bus);
  1226. return 0;
  1227. err_power_off_phy:
  1228. phy_power_off(pcie->phy);
  1229. err_exit_phy:
  1230. phy_exit(pcie->phy);
  1231. return ret;
  1232. }
  1233. EXPORT_SYMBOL(iproc_pcie_setup);
  1234. int iproc_pcie_remove(struct iproc_pcie *pcie)
  1235. {
  1236. pci_stop_root_bus(pcie->root_bus);
  1237. pci_remove_root_bus(pcie->root_bus);
  1238. iproc_pcie_msi_disable(pcie);
  1239. phy_power_off(pcie->phy);
  1240. phy_exit(pcie->phy);
  1241. return 0;
  1242. }
  1243. EXPORT_SYMBOL(iproc_pcie_remove);
  1244. MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
  1245. MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
  1246. MODULE_LICENSE("GPL v2");