pcie-rockchip.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Rockchip AXI PCIe host controller driver
  4. *
  5. * Copyright (c) 2016 Rockchip, Inc.
  6. *
  7. * Author: Shawn Lin <shawn.lin@rock-chips.com>
  8. * Wenrui Li <wenrui.li@rock-chips.com>
  9. *
  10. * Bits taken from Synopsys DesignWare Host controller driver and
  11. * ARM PCI Host generic driver.
  12. */
  13. #include <linux/bitrev.h>
  14. #include <linux/clk.h>
  15. #include <linux/delay.h>
  16. #include <linux/gpio/consumer.h>
  17. #include <linux/init.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/iopoll.h>
  20. #include <linux/irq.h>
  21. #include <linux/irqchip/chained_irq.h>
  22. #include <linux/irqdomain.h>
  23. #include <linux/kernel.h>
  24. #include <linux/mfd/syscon.h>
  25. #include <linux/module.h>
  26. #include <linux/of_address.h>
  27. #include <linux/of_device.h>
  28. #include <linux/of_pci.h>
  29. #include <linux/of_platform.h>
  30. #include <linux/of_irq.h>
  31. #include <linux/pci.h>
  32. #include <linux/pci_ids.h>
  33. #include <linux/phy/phy.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/reset.h>
  36. #include <linux/regmap.h>
  37. /*
  38. * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
  39. * bits. This allows atomic updates of the register without locking.
  40. */
  41. #define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
  42. #define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
  43. #define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
  44. #define MAX_LANE_NUM 4
  45. #define PCIE_CLIENT_BASE 0x0
  46. #define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
  47. #define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
  48. #define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
  49. #define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
  50. #define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
  51. #define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
  52. #define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
  53. #define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
  54. #define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
  55. #define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
  56. #define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
  57. #define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
  58. #define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
  59. #define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
  60. #define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
  61. #define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
  62. #define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
  63. #define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
  64. #define PCIE_CLIENT_INTR_SHIFT 5
  65. #define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
  66. #define PCIE_CLIENT_INT_MSG BIT(14)
  67. #define PCIE_CLIENT_INT_HOT_RST BIT(13)
  68. #define PCIE_CLIENT_INT_DPA BIT(12)
  69. #define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
  70. #define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
  71. #define PCIE_CLIENT_INT_CORR_ERR BIT(9)
  72. #define PCIE_CLIENT_INT_INTD BIT(8)
  73. #define PCIE_CLIENT_INT_INTC BIT(7)
  74. #define PCIE_CLIENT_INT_INTB BIT(6)
  75. #define PCIE_CLIENT_INT_INTA BIT(5)
  76. #define PCIE_CLIENT_INT_LOCAL BIT(4)
  77. #define PCIE_CLIENT_INT_UDMA BIT(3)
  78. #define PCIE_CLIENT_INT_PHY BIT(2)
  79. #define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
  80. #define PCIE_CLIENT_INT_PWR_STCG BIT(0)
  81. #define PCIE_CLIENT_INT_LEGACY \
  82. (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
  83. PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
  84. #define PCIE_CLIENT_INT_CLI \
  85. (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
  86. PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
  87. PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
  88. PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
  89. PCIE_CLIENT_INT_PHY)
  90. #define PCIE_CORE_CTRL_MGMT_BASE 0x900000
  91. #define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
  92. #define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
  93. #define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
  94. #define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
  95. #define PCIE_CORE_PL_CONF_LANE_SHIFT 1
  96. #define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
  97. #define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
  98. #define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
  99. #define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
  100. #define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
  101. #define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
  102. #define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
  103. #define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
  104. (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
  105. #define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
  106. #define PCIE_CORE_LANE_MAP_MASK 0x0000000f
  107. #define PCIE_CORE_LANE_MAP_REVERSE BIT(16)
  108. #define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
  109. #define PCIE_CORE_INT_PRFPE BIT(0)
  110. #define PCIE_CORE_INT_CRFPE BIT(1)
  111. #define PCIE_CORE_INT_RRPE BIT(2)
  112. #define PCIE_CORE_INT_PRFO BIT(3)
  113. #define PCIE_CORE_INT_CRFO BIT(4)
  114. #define PCIE_CORE_INT_RT BIT(5)
  115. #define PCIE_CORE_INT_RTR BIT(6)
  116. #define PCIE_CORE_INT_PE BIT(7)
  117. #define PCIE_CORE_INT_MTR BIT(8)
  118. #define PCIE_CORE_INT_UCR BIT(9)
  119. #define PCIE_CORE_INT_FCE BIT(10)
  120. #define PCIE_CORE_INT_CT BIT(11)
  121. #define PCIE_CORE_INT_UTC BIT(18)
  122. #define PCIE_CORE_INT_MMVC BIT(19)
  123. #define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44)
  124. #define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
  125. #define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
  126. #define PCIE_CORE_INT \
  127. (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
  128. PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
  129. PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
  130. PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
  131. PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
  132. PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
  133. PCIE_CORE_INT_MMVC)
  134. #define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
  135. #define PCIE_RC_CONFIG_BASE 0xa00000
  136. #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
  137. #define PCIE_RC_CONFIG_SCC_SHIFT 16
  138. #define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
  139. #define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
  140. #define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
  141. #define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
  142. #define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
  143. #define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
  144. #define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
  145. #define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
  146. #define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
  147. #define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
  148. #define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
  149. #define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
  150. #define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
  151. #define PCIE_CORE_AXI_CONF_BASE 0xc00000
  152. #define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
  153. #define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
  154. #define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
  155. #define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
  156. #define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
  157. #define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
  158. #define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
  159. #define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
  160. #define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
  161. #define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
  162. #define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
  163. /* Size of one AXI Region (not Region 0) */
  164. #define AXI_REGION_SIZE BIT(20)
  165. /* Size of Region 0, equal to sum of sizes of other regions */
  166. #define AXI_REGION_0_SIZE (32 * (0x1 << 20))
  167. #define OB_REG_SIZE_SHIFT 5
  168. #define IB_ROOT_PORT_REG_SIZE_SHIFT 3
  169. #define AXI_WRAPPER_IO_WRITE 0x6
  170. #define AXI_WRAPPER_MEM_WRITE 0x2
  171. #define AXI_WRAPPER_TYPE0_CFG 0xa
  172. #define AXI_WRAPPER_TYPE1_CFG 0xb
  173. #define AXI_WRAPPER_NOR_MSG 0xc
  174. #define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
  175. #define MIN_AXI_ADDR_BITS_PASSED 8
  176. #define PCIE_RC_SEND_PME_OFF 0x11960
  177. #define ROCKCHIP_VENDOR_ID 0x1d87
  178. #define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
  179. #define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
  180. #define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
  181. #define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
  182. #define PCIE_ECAM_ADDR(bus, dev, func, reg) \
  183. (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
  184. PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
  185. #define PCIE_LINK_IS_L2(x) \
  186. (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
  187. #define PCIE_LINK_UP(x) \
  188. (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
  189. #define PCIE_LINK_IS_GEN2(x) \
  190. (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
  191. #define RC_REGION_0_ADDR_TRANS_H 0x00000000
  192. #define RC_REGION_0_ADDR_TRANS_L 0x00000000
  193. #define RC_REGION_0_PASS_BITS (25 - 1)
  194. #define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
  195. #define MAX_AXI_WRAPPER_REGION_NUM 33
  196. struct rockchip_pcie {
  197. void __iomem *reg_base; /* DT axi-base */
  198. void __iomem *apb_base; /* DT apb-base */
  199. bool legacy_phy;
  200. struct phy *phys[MAX_LANE_NUM];
  201. struct reset_control *core_rst;
  202. struct reset_control *mgmt_rst;
  203. struct reset_control *mgmt_sticky_rst;
  204. struct reset_control *pipe_rst;
  205. struct reset_control *pm_rst;
  206. struct reset_control *aclk_rst;
  207. struct reset_control *pclk_rst;
  208. struct clk *aclk_pcie;
  209. struct clk *aclk_perf_pcie;
  210. struct clk *hclk_pcie;
  211. struct clk *clk_pcie_pm;
  212. struct regulator *vpcie12v; /* 12V power supply */
  213. struct regulator *vpcie3v3; /* 3.3V power supply */
  214. struct regulator *vpcie1v8; /* 1.8V power supply */
  215. struct regulator *vpcie0v9; /* 0.9V power supply */
  216. struct gpio_desc *ep_gpio;
  217. u32 lanes;
  218. u8 lanes_map;
  219. u8 root_bus_nr;
  220. int link_gen;
  221. struct device *dev;
  222. struct irq_domain *irq_domain;
  223. int offset;
  224. struct pci_bus *root_bus;
  225. struct resource *io;
  226. phys_addr_t io_bus_addr;
  227. u32 io_size;
  228. void __iomem *msg_region;
  229. u32 mem_size;
  230. phys_addr_t msg_bus_addr;
  231. phys_addr_t mem_bus_addr;
  232. };
  233. static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
  234. {
  235. return readl(rockchip->apb_base + reg);
  236. }
  237. static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
  238. u32 reg)
  239. {
  240. writel(val, rockchip->apb_base + reg);
  241. }
  242. static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
  243. {
  244. u32 status;
  245. status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
  246. status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
  247. rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
  248. }
  249. static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
  250. {
  251. u32 status;
  252. status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
  253. status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
  254. rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
  255. }
  256. static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
  257. {
  258. u32 val;
  259. /* Update Tx credit maximum update interval */
  260. val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
  261. val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
  262. val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
  263. rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
  264. }
  265. static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
  266. struct pci_bus *bus, int dev)
  267. {
  268. /* access only one slot on each root port */
  269. if (bus->number == rockchip->root_bus_nr && dev > 0)
  270. return 0;
  271. /*
  272. * do not read more than one device on the bus directly attached
  273. * to RC's downstream side.
  274. */
  275. if (bus->primary == rockchip->root_bus_nr && dev > 0)
  276. return 0;
  277. return 1;
  278. }
  279. static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
  280. {
  281. u32 val;
  282. u8 map;
  283. if (rockchip->legacy_phy)
  284. return GENMASK(MAX_LANE_NUM - 1, 0);
  285. val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
  286. map = val & PCIE_CORE_LANE_MAP_MASK;
  287. /* The link may be using a reverse-indexed mapping. */
  288. if (val & PCIE_CORE_LANE_MAP_REVERSE)
  289. map = bitrev8(map) >> 4;
  290. return map;
  291. }
  292. static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
  293. int where, int size, u32 *val)
  294. {
  295. void __iomem *addr;
  296. addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
  297. if (!IS_ALIGNED((uintptr_t)addr, size)) {
  298. *val = 0;
  299. return PCIBIOS_BAD_REGISTER_NUMBER;
  300. }
  301. if (size == 4) {
  302. *val = readl(addr);
  303. } else if (size == 2) {
  304. *val = readw(addr);
  305. } else if (size == 1) {
  306. *val = readb(addr);
  307. } else {
  308. *val = 0;
  309. return PCIBIOS_BAD_REGISTER_NUMBER;
  310. }
  311. return PCIBIOS_SUCCESSFUL;
  312. }
  313. static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
  314. int where, int size, u32 val)
  315. {
  316. u32 mask, tmp, offset;
  317. void __iomem *addr;
  318. offset = where & ~0x3;
  319. addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
  320. if (size == 4) {
  321. writel(val, addr);
  322. return PCIBIOS_SUCCESSFUL;
  323. }
  324. mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
  325. /*
  326. * N.B. This read/modify/write isn't safe in general because it can
  327. * corrupt RW1C bits in adjacent registers. But the hardware
  328. * doesn't support smaller writes.
  329. */
  330. tmp = readl(addr) & mask;
  331. tmp |= val << ((where & 0x3) * 8);
  332. writel(tmp, addr);
  333. return PCIBIOS_SUCCESSFUL;
  334. }
  335. static void rockchip_pcie_cfg_configuration_accesses(
  336. struct rockchip_pcie *rockchip, u32 type)
  337. {
  338. u32 ob_desc_0;
  339. /* Configuration Accesses for region 0 */
  340. rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
  341. rockchip_pcie_write(rockchip,
  342. (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
  343. PCIE_CORE_OB_REGION_ADDR0);
  344. rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
  345. PCIE_CORE_OB_REGION_ADDR1);
  346. ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
  347. ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
  348. ob_desc_0 |= (type | (0x1 << 23));
  349. rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
  350. rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
  351. }
  352. static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
  353. struct pci_bus *bus, u32 devfn,
  354. int where, int size, u32 *val)
  355. {
  356. u32 busdev;
  357. busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
  358. PCI_FUNC(devfn), where);
  359. if (!IS_ALIGNED(busdev, size)) {
  360. *val = 0;
  361. return PCIBIOS_BAD_REGISTER_NUMBER;
  362. }
  363. if (bus->parent->number == rockchip->root_bus_nr)
  364. rockchip_pcie_cfg_configuration_accesses(rockchip,
  365. AXI_WRAPPER_TYPE0_CFG);
  366. else
  367. rockchip_pcie_cfg_configuration_accesses(rockchip,
  368. AXI_WRAPPER_TYPE1_CFG);
  369. if (size == 4) {
  370. *val = readl(rockchip->reg_base + busdev);
  371. } else if (size == 2) {
  372. *val = readw(rockchip->reg_base + busdev);
  373. } else if (size == 1) {
  374. *val = readb(rockchip->reg_base + busdev);
  375. } else {
  376. *val = 0;
  377. return PCIBIOS_BAD_REGISTER_NUMBER;
  378. }
  379. return PCIBIOS_SUCCESSFUL;
  380. }
  381. static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
  382. struct pci_bus *bus, u32 devfn,
  383. int where, int size, u32 val)
  384. {
  385. u32 busdev;
  386. busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
  387. PCI_FUNC(devfn), where);
  388. if (!IS_ALIGNED(busdev, size))
  389. return PCIBIOS_BAD_REGISTER_NUMBER;
  390. if (bus->parent->number == rockchip->root_bus_nr)
  391. rockchip_pcie_cfg_configuration_accesses(rockchip,
  392. AXI_WRAPPER_TYPE0_CFG);
  393. else
  394. rockchip_pcie_cfg_configuration_accesses(rockchip,
  395. AXI_WRAPPER_TYPE1_CFG);
  396. if (size == 4)
  397. writel(val, rockchip->reg_base + busdev);
  398. else if (size == 2)
  399. writew(val, rockchip->reg_base + busdev);
  400. else if (size == 1)
  401. writeb(val, rockchip->reg_base + busdev);
  402. else
  403. return PCIBIOS_BAD_REGISTER_NUMBER;
  404. return PCIBIOS_SUCCESSFUL;
  405. }
  406. static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  407. int size, u32 *val)
  408. {
  409. struct rockchip_pcie *rockchip = bus->sysdata;
  410. if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
  411. *val = 0xffffffff;
  412. return PCIBIOS_DEVICE_NOT_FOUND;
  413. }
  414. if (bus->number == rockchip->root_bus_nr)
  415. return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
  416. return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val);
  417. }
  418. static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
  419. int where, int size, u32 val)
  420. {
  421. struct rockchip_pcie *rockchip = bus->sysdata;
  422. if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
  423. return PCIBIOS_DEVICE_NOT_FOUND;
  424. if (bus->number == rockchip->root_bus_nr)
  425. return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
  426. return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val);
  427. }
  428. static struct pci_ops rockchip_pcie_ops = {
  429. .read = rockchip_pcie_rd_conf,
  430. .write = rockchip_pcie_wr_conf,
  431. };
  432. static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
  433. {
  434. int curr;
  435. u32 status, scale, power;
  436. if (IS_ERR(rockchip->vpcie3v3))
  437. return;
  438. /*
  439. * Set RC's captured slot power limit and scale if
  440. * vpcie3v3 available. The default values are both zero
  441. * which means the software should set these two according
  442. * to the actual power supply.
  443. */
  444. curr = regulator_get_current_limit(rockchip->vpcie3v3);
  445. if (curr <= 0)
  446. return;
  447. scale = 3; /* 0.001x */
  448. curr = curr / 1000; /* convert to mA */
  449. power = (curr * 3300) / 1000; /* milliwatt */
  450. while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
  451. if (!scale) {
  452. dev_warn(rockchip->dev, "invalid power supply\n");
  453. return;
  454. }
  455. scale--;
  456. power = power / 10;
  457. }
  458. status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
  459. status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
  460. (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
  461. rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
  462. }
  463. /**
  464. * rockchip_pcie_init_port - Initialize hardware
  465. * @rockchip: PCIe port information
  466. */
  467. static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
  468. {
  469. struct device *dev = rockchip->dev;
  470. int err, i;
  471. u32 status;
  472. gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
  473. err = reset_control_assert(rockchip->aclk_rst);
  474. if (err) {
  475. dev_err(dev, "assert aclk_rst err %d\n", err);
  476. return err;
  477. }
  478. err = reset_control_assert(rockchip->pclk_rst);
  479. if (err) {
  480. dev_err(dev, "assert pclk_rst err %d\n", err);
  481. return err;
  482. }
  483. err = reset_control_assert(rockchip->pm_rst);
  484. if (err) {
  485. dev_err(dev, "assert pm_rst err %d\n", err);
  486. return err;
  487. }
  488. for (i = 0; i < MAX_LANE_NUM; i++) {
  489. err = phy_init(rockchip->phys[i]);
  490. if (err) {
  491. dev_err(dev, "init phy%d err %d\n", i, err);
  492. goto err_exit_phy;
  493. }
  494. }
  495. err = reset_control_assert(rockchip->core_rst);
  496. if (err) {
  497. dev_err(dev, "assert core_rst err %d\n", err);
  498. goto err_exit_phy;
  499. }
  500. err = reset_control_assert(rockchip->mgmt_rst);
  501. if (err) {
  502. dev_err(dev, "assert mgmt_rst err %d\n", err);
  503. goto err_exit_phy;
  504. }
  505. err = reset_control_assert(rockchip->mgmt_sticky_rst);
  506. if (err) {
  507. dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
  508. goto err_exit_phy;
  509. }
  510. err = reset_control_assert(rockchip->pipe_rst);
  511. if (err) {
  512. dev_err(dev, "assert pipe_rst err %d\n", err);
  513. goto err_exit_phy;
  514. }
  515. udelay(10);
  516. err = reset_control_deassert(rockchip->pm_rst);
  517. if (err) {
  518. dev_err(dev, "deassert pm_rst err %d\n", err);
  519. goto err_exit_phy;
  520. }
  521. err = reset_control_deassert(rockchip->aclk_rst);
  522. if (err) {
  523. dev_err(dev, "deassert aclk_rst err %d\n", err);
  524. goto err_exit_phy;
  525. }
  526. err = reset_control_deassert(rockchip->pclk_rst);
  527. if (err) {
  528. dev_err(dev, "deassert pclk_rst err %d\n", err);
  529. goto err_exit_phy;
  530. }
  531. if (rockchip->link_gen == 2)
  532. rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2,
  533. PCIE_CLIENT_CONFIG);
  534. else
  535. rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
  536. PCIE_CLIENT_CONFIG);
  537. rockchip_pcie_write(rockchip,
  538. PCIE_CLIENT_CONF_ENABLE |
  539. PCIE_CLIENT_LINK_TRAIN_ENABLE |
  540. PCIE_CLIENT_ARI_ENABLE |
  541. PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) |
  542. PCIE_CLIENT_MODE_RC,
  543. PCIE_CLIENT_CONFIG);
  544. for (i = 0; i < MAX_LANE_NUM; i++) {
  545. err = phy_power_on(rockchip->phys[i]);
  546. if (err) {
  547. dev_err(dev, "power on phy%d err %d\n", i, err);
  548. goto err_power_off_phy;
  549. }
  550. }
  551. /*
  552. * Please don't reorder the deassert sequence of the following
  553. * four reset pins.
  554. */
  555. err = reset_control_deassert(rockchip->mgmt_sticky_rst);
  556. if (err) {
  557. dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
  558. goto err_power_off_phy;
  559. }
  560. err = reset_control_deassert(rockchip->core_rst);
  561. if (err) {
  562. dev_err(dev, "deassert core_rst err %d\n", err);
  563. goto err_power_off_phy;
  564. }
  565. err = reset_control_deassert(rockchip->mgmt_rst);
  566. if (err) {
  567. dev_err(dev, "deassert mgmt_rst err %d\n", err);
  568. goto err_power_off_phy;
  569. }
  570. err = reset_control_deassert(rockchip->pipe_rst);
  571. if (err) {
  572. dev_err(dev, "deassert pipe_rst err %d\n", err);
  573. goto err_power_off_phy;
  574. }
  575. /* Fix the transmitted FTS count desired to exit from L0s. */
  576. status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
  577. status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
  578. (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
  579. rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
  580. rockchip_pcie_set_power_limit(rockchip);
  581. /* Set RC's clock architecture as common clock */
  582. status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
  583. status |= PCI_EXP_LNKSTA_SLC << 16;
  584. rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
  585. /* Set RC's RCB to 128 */
  586. status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
  587. status |= PCI_EXP_LNKCTL_RCB;
  588. rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
  589. /* Enable Gen1 training */
  590. rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
  591. PCIE_CLIENT_CONFIG);
  592. gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
  593. /* 500ms timeout value should be enough for Gen1/2 training */
  594. err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
  595. status, PCIE_LINK_UP(status), 20,
  596. 500 * USEC_PER_MSEC);
  597. if (err) {
  598. dev_err(dev, "PCIe link training gen1 timeout!\n");
  599. goto err_power_off_phy;
  600. }
  601. if (rockchip->link_gen == 2) {
  602. /*
  603. * Enable retrain for gen2. This should be configured only after
  604. * gen1 finished.
  605. */
  606. status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
  607. status |= PCI_EXP_LNKCTL_RL;
  608. rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
  609. err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
  610. status, PCIE_LINK_IS_GEN2(status), 20,
  611. 500 * USEC_PER_MSEC);
  612. if (err)
  613. dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
  614. }
  615. /* Check the final link width from negotiated lane counter from MGMT */
  616. status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
  617. status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
  618. PCIE_CORE_PL_CONF_LANE_SHIFT);
  619. dev_dbg(dev, "current link width is x%d\n", status);
  620. /* Power off unused lane(s) */
  621. rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
  622. for (i = 0; i < MAX_LANE_NUM; i++) {
  623. if (!(rockchip->lanes_map & BIT(i))) {
  624. dev_dbg(dev, "idling lane %d\n", i);
  625. phy_power_off(rockchip->phys[i]);
  626. }
  627. }
  628. rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
  629. PCIE_CORE_CONFIG_VENDOR);
  630. rockchip_pcie_write(rockchip,
  631. PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
  632. PCIE_RC_CONFIG_RID_CCR);
  633. /* Clear THP cap's next cap pointer to remove L1 substate cap */
  634. status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
  635. status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
  636. rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
  637. /* Clear L0s from RC's link cap */
  638. if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
  639. status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
  640. status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
  641. rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
  642. }
  643. status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
  644. status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
  645. status |= PCIE_RC_CONFIG_DCSR_MPS_256;
  646. rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
  647. return 0;
  648. err_power_off_phy:
  649. while (i--)
  650. phy_power_off(rockchip->phys[i]);
  651. i = MAX_LANE_NUM;
  652. err_exit_phy:
  653. while (i--)
  654. phy_exit(rockchip->phys[i]);
  655. return err;
  656. }
  657. static void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
  658. {
  659. int i;
  660. for (i = 0; i < MAX_LANE_NUM; i++) {
  661. /* inactive lanes are already powered off */
  662. if (rockchip->lanes_map & BIT(i))
  663. phy_power_off(rockchip->phys[i]);
  664. phy_exit(rockchip->phys[i]);
  665. }
  666. }
  667. static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
  668. {
  669. struct rockchip_pcie *rockchip = arg;
  670. struct device *dev = rockchip->dev;
  671. u32 reg;
  672. u32 sub_reg;
  673. reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
  674. if (reg & PCIE_CLIENT_INT_LOCAL) {
  675. dev_dbg(dev, "local interrupt received\n");
  676. sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
  677. if (sub_reg & PCIE_CORE_INT_PRFPE)
  678. dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
  679. if (sub_reg & PCIE_CORE_INT_CRFPE)
  680. dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
  681. if (sub_reg & PCIE_CORE_INT_RRPE)
  682. dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
  683. if (sub_reg & PCIE_CORE_INT_PRFO)
  684. dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
  685. if (sub_reg & PCIE_CORE_INT_CRFO)
  686. dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
  687. if (sub_reg & PCIE_CORE_INT_RT)
  688. dev_dbg(dev, "replay timer timed out\n");
  689. if (sub_reg & PCIE_CORE_INT_RTR)
  690. dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
  691. if (sub_reg & PCIE_CORE_INT_PE)
  692. dev_dbg(dev, "phy error detected on receive side\n");
  693. if (sub_reg & PCIE_CORE_INT_MTR)
  694. dev_dbg(dev, "malformed TLP received from the link\n");
  695. if (sub_reg & PCIE_CORE_INT_UCR)
  696. dev_dbg(dev, "malformed TLP received from the link\n");
  697. if (sub_reg & PCIE_CORE_INT_FCE)
  698. dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
  699. if (sub_reg & PCIE_CORE_INT_CT)
  700. dev_dbg(dev, "a request timed out waiting for completion\n");
  701. if (sub_reg & PCIE_CORE_INT_UTC)
  702. dev_dbg(dev, "unmapped TC error\n");
  703. if (sub_reg & PCIE_CORE_INT_MMVC)
  704. dev_dbg(dev, "MSI mask register changes\n");
  705. rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
  706. } else if (reg & PCIE_CLIENT_INT_PHY) {
  707. dev_dbg(dev, "phy link changes\n");
  708. rockchip_pcie_update_txcredit_mui(rockchip);
  709. rockchip_pcie_clr_bw_int(rockchip);
  710. }
  711. rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
  712. PCIE_CLIENT_INT_STATUS);
  713. return IRQ_HANDLED;
  714. }
  715. static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
  716. {
  717. struct rockchip_pcie *rockchip = arg;
  718. struct device *dev = rockchip->dev;
  719. u32 reg;
  720. reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
  721. if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
  722. dev_dbg(dev, "legacy done interrupt received\n");
  723. if (reg & PCIE_CLIENT_INT_MSG)
  724. dev_dbg(dev, "message done interrupt received\n");
  725. if (reg & PCIE_CLIENT_INT_HOT_RST)
  726. dev_dbg(dev, "hot reset interrupt received\n");
  727. if (reg & PCIE_CLIENT_INT_DPA)
  728. dev_dbg(dev, "dpa interrupt received\n");
  729. if (reg & PCIE_CLIENT_INT_FATAL_ERR)
  730. dev_dbg(dev, "fatal error interrupt received\n");
  731. if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
  732. dev_dbg(dev, "no fatal error interrupt received\n");
  733. if (reg & PCIE_CLIENT_INT_CORR_ERR)
  734. dev_dbg(dev, "correctable error interrupt received\n");
  735. if (reg & PCIE_CLIENT_INT_PHY)
  736. dev_dbg(dev, "phy interrupt received\n");
  737. rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
  738. PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
  739. PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
  740. PCIE_CLIENT_INT_NFATAL_ERR |
  741. PCIE_CLIENT_INT_CORR_ERR |
  742. PCIE_CLIENT_INT_PHY),
  743. PCIE_CLIENT_INT_STATUS);
  744. return IRQ_HANDLED;
  745. }
  746. static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
  747. {
  748. struct irq_chip *chip = irq_desc_get_chip(desc);
  749. struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
  750. struct device *dev = rockchip->dev;
  751. u32 reg;
  752. u32 hwirq;
  753. u32 virq;
  754. chained_irq_enter(chip, desc);
  755. reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
  756. reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
  757. while (reg) {
  758. hwirq = ffs(reg) - 1;
  759. reg &= ~BIT(hwirq);
  760. virq = irq_find_mapping(rockchip->irq_domain, hwirq);
  761. if (virq)
  762. generic_handle_irq(virq);
  763. else
  764. dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
  765. }
  766. chained_irq_exit(chip, desc);
  767. }
  768. static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
  769. {
  770. struct device *dev = rockchip->dev;
  771. struct phy *phy;
  772. char *name;
  773. u32 i;
  774. phy = devm_phy_get(dev, "pcie-phy");
  775. if (!IS_ERR(phy)) {
  776. rockchip->legacy_phy = true;
  777. rockchip->phys[0] = phy;
  778. dev_warn(dev, "legacy phy model is deprecated!\n");
  779. return 0;
  780. }
  781. if (PTR_ERR(phy) == -EPROBE_DEFER)
  782. return PTR_ERR(phy);
  783. dev_dbg(dev, "missing legacy phy; search for per-lane PHY\n");
  784. for (i = 0; i < MAX_LANE_NUM; i++) {
  785. name = kasprintf(GFP_KERNEL, "pcie-phy-%u", i);
  786. if (!name)
  787. return -ENOMEM;
  788. phy = devm_of_phy_get(dev, dev->of_node, name);
  789. kfree(name);
  790. if (IS_ERR(phy)) {
  791. if (PTR_ERR(phy) != -EPROBE_DEFER)
  792. dev_err(dev, "missing phy for lane %d: %ld\n",
  793. i, PTR_ERR(phy));
  794. return PTR_ERR(phy);
  795. }
  796. rockchip->phys[i] = phy;
  797. }
  798. return 0;
  799. }
  800. static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
  801. {
  802. int irq, err;
  803. struct device *dev = rockchip->dev;
  804. struct platform_device *pdev = to_platform_device(dev);
  805. irq = platform_get_irq_byname(pdev, "sys");
  806. if (irq < 0) {
  807. dev_err(dev, "missing sys IRQ resource\n");
  808. return irq;
  809. }
  810. err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
  811. IRQF_SHARED, "pcie-sys", rockchip);
  812. if (err) {
  813. dev_err(dev, "failed to request PCIe subsystem IRQ\n");
  814. return err;
  815. }
  816. irq = platform_get_irq_byname(pdev, "legacy");
  817. if (irq < 0) {
  818. dev_err(dev, "missing legacy IRQ resource\n");
  819. return irq;
  820. }
  821. irq_set_chained_handler_and_data(irq,
  822. rockchip_pcie_legacy_int_handler,
  823. rockchip);
  824. irq = platform_get_irq_byname(pdev, "client");
  825. if (irq < 0) {
  826. dev_err(dev, "missing client IRQ resource\n");
  827. return irq;
  828. }
  829. err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
  830. IRQF_SHARED, "pcie-client", rockchip);
  831. if (err) {
  832. dev_err(dev, "failed to request PCIe client IRQ\n");
  833. return err;
  834. }
  835. return 0;
  836. }
  837. /**
  838. * rockchip_pcie_parse_dt - Parse Device Tree
  839. * @rockchip: PCIe port information
  840. *
  841. * Return: '0' on success and error value on failure
  842. */
  843. static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
  844. {
  845. struct device *dev = rockchip->dev;
  846. struct platform_device *pdev = to_platform_device(dev);
  847. struct device_node *node = dev->of_node;
  848. struct resource *regs;
  849. int err;
  850. regs = platform_get_resource_byname(pdev,
  851. IORESOURCE_MEM,
  852. "axi-base");
  853. rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
  854. if (IS_ERR(rockchip->reg_base))
  855. return PTR_ERR(rockchip->reg_base);
  856. regs = platform_get_resource_byname(pdev,
  857. IORESOURCE_MEM,
  858. "apb-base");
  859. rockchip->apb_base = devm_ioremap_resource(dev, regs);
  860. if (IS_ERR(rockchip->apb_base))
  861. return PTR_ERR(rockchip->apb_base);
  862. err = rockchip_pcie_get_phys(rockchip);
  863. if (err)
  864. return err;
  865. rockchip->lanes = 1;
  866. err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
  867. if (!err && (rockchip->lanes == 0 ||
  868. rockchip->lanes == 3 ||
  869. rockchip->lanes > 4)) {
  870. dev_warn(dev, "invalid num-lanes, default to use one lane\n");
  871. rockchip->lanes = 1;
  872. }
  873. rockchip->link_gen = of_pci_get_max_link_speed(node);
  874. if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
  875. rockchip->link_gen = 2;
  876. rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
  877. if (IS_ERR(rockchip->core_rst)) {
  878. if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
  879. dev_err(dev, "missing core reset property in node\n");
  880. return PTR_ERR(rockchip->core_rst);
  881. }
  882. rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
  883. if (IS_ERR(rockchip->mgmt_rst)) {
  884. if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
  885. dev_err(dev, "missing mgmt reset property in node\n");
  886. return PTR_ERR(rockchip->mgmt_rst);
  887. }
  888. rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
  889. "mgmt-sticky");
  890. if (IS_ERR(rockchip->mgmt_sticky_rst)) {
  891. if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
  892. dev_err(dev, "missing mgmt-sticky reset property in node\n");
  893. return PTR_ERR(rockchip->mgmt_sticky_rst);
  894. }
  895. rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
  896. if (IS_ERR(rockchip->pipe_rst)) {
  897. if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
  898. dev_err(dev, "missing pipe reset property in node\n");
  899. return PTR_ERR(rockchip->pipe_rst);
  900. }
  901. rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
  902. if (IS_ERR(rockchip->pm_rst)) {
  903. if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
  904. dev_err(dev, "missing pm reset property in node\n");
  905. return PTR_ERR(rockchip->pm_rst);
  906. }
  907. rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
  908. if (IS_ERR(rockchip->pclk_rst)) {
  909. if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
  910. dev_err(dev, "missing pclk reset property in node\n");
  911. return PTR_ERR(rockchip->pclk_rst);
  912. }
  913. rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
  914. if (IS_ERR(rockchip->aclk_rst)) {
  915. if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
  916. dev_err(dev, "missing aclk reset property in node\n");
  917. return PTR_ERR(rockchip->aclk_rst);
  918. }
  919. rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
  920. if (IS_ERR(rockchip->ep_gpio)) {
  921. dev_err(dev, "missing ep-gpios property in node\n");
  922. return PTR_ERR(rockchip->ep_gpio);
  923. }
  924. rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
  925. if (IS_ERR(rockchip->aclk_pcie)) {
  926. dev_err(dev, "aclk clock not found\n");
  927. return PTR_ERR(rockchip->aclk_pcie);
  928. }
  929. rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
  930. if (IS_ERR(rockchip->aclk_perf_pcie)) {
  931. dev_err(dev, "aclk_perf clock not found\n");
  932. return PTR_ERR(rockchip->aclk_perf_pcie);
  933. }
  934. rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
  935. if (IS_ERR(rockchip->hclk_pcie)) {
  936. dev_err(dev, "hclk clock not found\n");
  937. return PTR_ERR(rockchip->hclk_pcie);
  938. }
  939. rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
  940. if (IS_ERR(rockchip->clk_pcie_pm)) {
  941. dev_err(dev, "pm clock not found\n");
  942. return PTR_ERR(rockchip->clk_pcie_pm);
  943. }
  944. err = rockchip_pcie_setup_irq(rockchip);
  945. if (err)
  946. return err;
  947. rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
  948. if (IS_ERR(rockchip->vpcie12v)) {
  949. if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
  950. return -EPROBE_DEFER;
  951. dev_info(dev, "no vpcie12v regulator found\n");
  952. }
  953. rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
  954. if (IS_ERR(rockchip->vpcie3v3)) {
  955. if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
  956. return -EPROBE_DEFER;
  957. dev_info(dev, "no vpcie3v3 regulator found\n");
  958. }
  959. rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
  960. if (IS_ERR(rockchip->vpcie1v8)) {
  961. if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
  962. return -EPROBE_DEFER;
  963. dev_info(dev, "no vpcie1v8 regulator found\n");
  964. }
  965. rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
  966. if (IS_ERR(rockchip->vpcie0v9)) {
  967. if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
  968. return -EPROBE_DEFER;
  969. dev_info(dev, "no vpcie0v9 regulator found\n");
  970. }
  971. return 0;
  972. }
  973. static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
  974. {
  975. struct device *dev = rockchip->dev;
  976. int err;
  977. if (!IS_ERR(rockchip->vpcie12v)) {
  978. err = regulator_enable(rockchip->vpcie12v);
  979. if (err) {
  980. dev_err(dev, "fail to enable vpcie12v regulator\n");
  981. goto err_out;
  982. }
  983. }
  984. if (!IS_ERR(rockchip->vpcie3v3)) {
  985. err = regulator_enable(rockchip->vpcie3v3);
  986. if (err) {
  987. dev_err(dev, "fail to enable vpcie3v3 regulator\n");
  988. goto err_disable_12v;
  989. }
  990. }
  991. if (!IS_ERR(rockchip->vpcie1v8)) {
  992. err = regulator_enable(rockchip->vpcie1v8);
  993. if (err) {
  994. dev_err(dev, "fail to enable vpcie1v8 regulator\n");
  995. goto err_disable_3v3;
  996. }
  997. }
  998. if (!IS_ERR(rockchip->vpcie0v9)) {
  999. err = regulator_enable(rockchip->vpcie0v9);
  1000. if (err) {
  1001. dev_err(dev, "fail to enable vpcie0v9 regulator\n");
  1002. goto err_disable_1v8;
  1003. }
  1004. }
  1005. return 0;
  1006. err_disable_1v8:
  1007. if (!IS_ERR(rockchip->vpcie1v8))
  1008. regulator_disable(rockchip->vpcie1v8);
  1009. err_disable_3v3:
  1010. if (!IS_ERR(rockchip->vpcie3v3))
  1011. regulator_disable(rockchip->vpcie3v3);
  1012. err_disable_12v:
  1013. if (!IS_ERR(rockchip->vpcie12v))
  1014. regulator_disable(rockchip->vpcie12v);
  1015. err_out:
  1016. return err;
  1017. }
  1018. static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
  1019. {
  1020. rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
  1021. (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
  1022. rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
  1023. PCIE_CORE_INT_MASK);
  1024. rockchip_pcie_enable_bw_int(rockchip);
  1025. }
  1026. static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
  1027. irq_hw_number_t hwirq)
  1028. {
  1029. irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
  1030. irq_set_chip_data(irq, domain->host_data);
  1031. return 0;
  1032. }
  1033. static const struct irq_domain_ops intx_domain_ops = {
  1034. .map = rockchip_pcie_intx_map,
  1035. };
  1036. static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
  1037. {
  1038. struct device *dev = rockchip->dev;
  1039. struct device_node *intc = of_get_next_child(dev->of_node, NULL);
  1040. if (!intc) {
  1041. dev_err(dev, "missing child interrupt-controller node\n");
  1042. return -EINVAL;
  1043. }
  1044. rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
  1045. &intx_domain_ops, rockchip);
  1046. if (!rockchip->irq_domain) {
  1047. dev_err(dev, "failed to get a INTx IRQ domain\n");
  1048. return -EINVAL;
  1049. }
  1050. return 0;
  1051. }
  1052. static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
  1053. int region_no, int type, u8 num_pass_bits,
  1054. u32 lower_addr, u32 upper_addr)
  1055. {
  1056. u32 ob_addr_0;
  1057. u32 ob_addr_1;
  1058. u32 ob_desc_0;
  1059. u32 aw_offset;
  1060. if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
  1061. return -EINVAL;
  1062. if (num_pass_bits + 1 < 8)
  1063. return -EINVAL;
  1064. if (num_pass_bits > 63)
  1065. return -EINVAL;
  1066. if (region_no == 0) {
  1067. if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
  1068. return -EINVAL;
  1069. }
  1070. if (region_no != 0) {
  1071. if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
  1072. return -EINVAL;
  1073. }
  1074. aw_offset = (region_no << OB_REG_SIZE_SHIFT);
  1075. ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
  1076. ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
  1077. ob_addr_1 = upper_addr;
  1078. ob_desc_0 = (1 << 23 | type);
  1079. rockchip_pcie_write(rockchip, ob_addr_0,
  1080. PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
  1081. rockchip_pcie_write(rockchip, ob_addr_1,
  1082. PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
  1083. rockchip_pcie_write(rockchip, ob_desc_0,
  1084. PCIE_CORE_OB_REGION_DESC0 + aw_offset);
  1085. rockchip_pcie_write(rockchip, 0,
  1086. PCIE_CORE_OB_REGION_DESC1 + aw_offset);
  1087. return 0;
  1088. }
  1089. static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
  1090. int region_no, u8 num_pass_bits,
  1091. u32 lower_addr, u32 upper_addr)
  1092. {
  1093. u32 ib_addr_0;
  1094. u32 ib_addr_1;
  1095. u32 aw_offset;
  1096. if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
  1097. return -EINVAL;
  1098. if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
  1099. return -EINVAL;
  1100. if (num_pass_bits > 63)
  1101. return -EINVAL;
  1102. aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
  1103. ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
  1104. ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
  1105. ib_addr_1 = upper_addr;
  1106. rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
  1107. rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
  1108. return 0;
  1109. }
  1110. static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
  1111. {
  1112. struct device *dev = rockchip->dev;
  1113. int offset;
  1114. int err;
  1115. int reg_no;
  1116. rockchip_pcie_cfg_configuration_accesses(rockchip,
  1117. AXI_WRAPPER_TYPE0_CFG);
  1118. for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
  1119. err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
  1120. AXI_WRAPPER_MEM_WRITE,
  1121. 20 - 1,
  1122. rockchip->mem_bus_addr +
  1123. (reg_no << 20),
  1124. 0);
  1125. if (err) {
  1126. dev_err(dev, "program RC mem outbound ATU failed\n");
  1127. return err;
  1128. }
  1129. }
  1130. err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
  1131. if (err) {
  1132. dev_err(dev, "program RC mem inbound ATU failed\n");
  1133. return err;
  1134. }
  1135. offset = rockchip->mem_size >> 20;
  1136. for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
  1137. err = rockchip_pcie_prog_ob_atu(rockchip,
  1138. reg_no + 1 + offset,
  1139. AXI_WRAPPER_IO_WRITE,
  1140. 20 - 1,
  1141. rockchip->io_bus_addr +
  1142. (reg_no << 20),
  1143. 0);
  1144. if (err) {
  1145. dev_err(dev, "program RC io outbound ATU failed\n");
  1146. return err;
  1147. }
  1148. }
  1149. /* assign message regions */
  1150. rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
  1151. AXI_WRAPPER_NOR_MSG,
  1152. 20 - 1, 0, 0);
  1153. rockchip->msg_bus_addr = rockchip->mem_bus_addr +
  1154. ((reg_no + offset) << 20);
  1155. return err;
  1156. }
  1157. static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
  1158. {
  1159. u32 value;
  1160. int err;
  1161. /* send PME_TURN_OFF message */
  1162. writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
  1163. /* read LTSSM and wait for falling into L2 link state */
  1164. err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
  1165. value, PCIE_LINK_IS_L2(value), 20,
  1166. jiffies_to_usecs(5 * HZ));
  1167. if (err) {
  1168. dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
  1169. return err;
  1170. }
  1171. return 0;
  1172. }
  1173. static int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
  1174. {
  1175. struct device *dev = rockchip->dev;
  1176. int err;
  1177. err = clk_prepare_enable(rockchip->aclk_pcie);
  1178. if (err) {
  1179. dev_err(dev, "unable to enable aclk_pcie clock\n");
  1180. return err;
  1181. }
  1182. err = clk_prepare_enable(rockchip->aclk_perf_pcie);
  1183. if (err) {
  1184. dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
  1185. goto err_aclk_perf_pcie;
  1186. }
  1187. err = clk_prepare_enable(rockchip->hclk_pcie);
  1188. if (err) {
  1189. dev_err(dev, "unable to enable hclk_pcie clock\n");
  1190. goto err_hclk_pcie;
  1191. }
  1192. err = clk_prepare_enable(rockchip->clk_pcie_pm);
  1193. if (err) {
  1194. dev_err(dev, "unable to enable clk_pcie_pm clock\n");
  1195. goto err_clk_pcie_pm;
  1196. }
  1197. return 0;
  1198. err_clk_pcie_pm:
  1199. clk_disable_unprepare(rockchip->hclk_pcie);
  1200. err_hclk_pcie:
  1201. clk_disable_unprepare(rockchip->aclk_perf_pcie);
  1202. err_aclk_perf_pcie:
  1203. clk_disable_unprepare(rockchip->aclk_pcie);
  1204. return err;
  1205. }
  1206. static void rockchip_pcie_disable_clocks(void *data)
  1207. {
  1208. struct rockchip_pcie *rockchip = data;
  1209. clk_disable_unprepare(rockchip->clk_pcie_pm);
  1210. clk_disable_unprepare(rockchip->hclk_pcie);
  1211. clk_disable_unprepare(rockchip->aclk_perf_pcie);
  1212. clk_disable_unprepare(rockchip->aclk_pcie);
  1213. }
  1214. static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
  1215. {
  1216. struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
  1217. int ret;
  1218. /* disable core and cli int since we don't need to ack PME_ACK */
  1219. rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
  1220. PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
  1221. rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
  1222. ret = rockchip_pcie_wait_l2(rockchip);
  1223. if (ret) {
  1224. rockchip_pcie_enable_interrupts(rockchip);
  1225. return ret;
  1226. }
  1227. rockchip_pcie_deinit_phys(rockchip);
  1228. rockchip_pcie_disable_clocks(rockchip);
  1229. if (!IS_ERR(rockchip->vpcie0v9))
  1230. regulator_disable(rockchip->vpcie0v9);
  1231. return ret;
  1232. }
  1233. static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
  1234. {
  1235. struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
  1236. int err;
  1237. if (!IS_ERR(rockchip->vpcie0v9)) {
  1238. err = regulator_enable(rockchip->vpcie0v9);
  1239. if (err) {
  1240. dev_err(dev, "fail to enable vpcie0v9 regulator\n");
  1241. return err;
  1242. }
  1243. }
  1244. err = rockchip_pcie_enable_clocks(rockchip);
  1245. if (err)
  1246. goto err_disable_0v9;
  1247. err = rockchip_pcie_init_port(rockchip);
  1248. if (err)
  1249. goto err_pcie_resume;
  1250. err = rockchip_pcie_cfg_atu(rockchip);
  1251. if (err)
  1252. goto err_err_deinit_port;
  1253. /* Need this to enter L1 again */
  1254. rockchip_pcie_update_txcredit_mui(rockchip);
  1255. rockchip_pcie_enable_interrupts(rockchip);
  1256. return 0;
  1257. err_err_deinit_port:
  1258. rockchip_pcie_deinit_phys(rockchip);
  1259. err_pcie_resume:
  1260. rockchip_pcie_disable_clocks(rockchip);
  1261. err_disable_0v9:
  1262. if (!IS_ERR(rockchip->vpcie0v9))
  1263. regulator_disable(rockchip->vpcie0v9);
  1264. return err;
  1265. }
  1266. static int rockchip_pcie_probe(struct platform_device *pdev)
  1267. {
  1268. struct rockchip_pcie *rockchip;
  1269. struct device *dev = &pdev->dev;
  1270. struct pci_bus *bus, *child;
  1271. struct pci_host_bridge *bridge;
  1272. struct resource_entry *win;
  1273. resource_size_t io_base;
  1274. struct resource *mem;
  1275. struct resource *io;
  1276. int err;
  1277. LIST_HEAD(res);
  1278. if (!dev->of_node)
  1279. return -ENODEV;
  1280. bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
  1281. if (!bridge)
  1282. return -ENOMEM;
  1283. rockchip = pci_host_bridge_priv(bridge);
  1284. platform_set_drvdata(pdev, rockchip);
  1285. rockchip->dev = dev;
  1286. err = rockchip_pcie_parse_dt(rockchip);
  1287. if (err)
  1288. return err;
  1289. err = rockchip_pcie_enable_clocks(rockchip);
  1290. if (err)
  1291. return err;
  1292. err = rockchip_pcie_set_vpcie(rockchip);
  1293. if (err) {
  1294. dev_err(dev, "failed to set vpcie regulator\n");
  1295. goto err_set_vpcie;
  1296. }
  1297. err = rockchip_pcie_init_port(rockchip);
  1298. if (err)
  1299. goto err_vpcie;
  1300. rockchip_pcie_enable_interrupts(rockchip);
  1301. err = rockchip_pcie_init_irq_domain(rockchip);
  1302. if (err < 0)
  1303. goto err_deinit_port;
  1304. err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
  1305. &res, &io_base);
  1306. if (err)
  1307. goto err_remove_irq_domain;
  1308. err = devm_request_pci_bus_resources(dev, &res);
  1309. if (err)
  1310. goto err_free_res;
  1311. /* Get the I/O and memory ranges from DT */
  1312. resource_list_for_each_entry(win, &res) {
  1313. switch (resource_type(win->res)) {
  1314. case IORESOURCE_IO:
  1315. io = win->res;
  1316. io->name = "I/O";
  1317. rockchip->io_size = resource_size(io);
  1318. rockchip->io_bus_addr = io->start - win->offset;
  1319. err = pci_remap_iospace(io, io_base);
  1320. if (err) {
  1321. dev_warn(dev, "error %d: failed to map resource %pR\n",
  1322. err, io);
  1323. continue;
  1324. }
  1325. rockchip->io = io;
  1326. break;
  1327. case IORESOURCE_MEM:
  1328. mem = win->res;
  1329. mem->name = "MEM";
  1330. rockchip->mem_size = resource_size(mem);
  1331. rockchip->mem_bus_addr = mem->start - win->offset;
  1332. break;
  1333. case IORESOURCE_BUS:
  1334. rockchip->root_bus_nr = win->res->start;
  1335. break;
  1336. default:
  1337. continue;
  1338. }
  1339. }
  1340. err = rockchip_pcie_cfg_atu(rockchip);
  1341. if (err)
  1342. goto err_unmap_iospace;
  1343. rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
  1344. if (!rockchip->msg_region) {
  1345. err = -ENOMEM;
  1346. goto err_unmap_iospace;
  1347. }
  1348. list_splice_init(&res, &bridge->windows);
  1349. bridge->dev.parent = dev;
  1350. bridge->sysdata = rockchip;
  1351. bridge->busnr = 0;
  1352. bridge->ops = &rockchip_pcie_ops;
  1353. bridge->map_irq = of_irq_parse_and_map_pci;
  1354. bridge->swizzle_irq = pci_common_swizzle;
  1355. err = pci_scan_root_bus_bridge(bridge);
  1356. if (err < 0)
  1357. goto err_unmap_iospace;
  1358. bus = bridge->bus;
  1359. rockchip->root_bus = bus;
  1360. pci_bus_size_bridges(bus);
  1361. pci_bus_assign_resources(bus);
  1362. list_for_each_entry(child, &bus->children, node)
  1363. pcie_bus_configure_settings(child);
  1364. pci_bus_add_devices(bus);
  1365. return 0;
  1366. err_unmap_iospace:
  1367. pci_unmap_iospace(rockchip->io);
  1368. err_free_res:
  1369. pci_free_resource_list(&res);
  1370. err_remove_irq_domain:
  1371. irq_domain_remove(rockchip->irq_domain);
  1372. err_deinit_port:
  1373. rockchip_pcie_deinit_phys(rockchip);
  1374. err_vpcie:
  1375. if (!IS_ERR(rockchip->vpcie12v))
  1376. regulator_disable(rockchip->vpcie12v);
  1377. if (!IS_ERR(rockchip->vpcie3v3))
  1378. regulator_disable(rockchip->vpcie3v3);
  1379. if (!IS_ERR(rockchip->vpcie1v8))
  1380. regulator_disable(rockchip->vpcie1v8);
  1381. if (!IS_ERR(rockchip->vpcie0v9))
  1382. regulator_disable(rockchip->vpcie0v9);
  1383. err_set_vpcie:
  1384. rockchip_pcie_disable_clocks(rockchip);
  1385. return err;
  1386. }
  1387. static int rockchip_pcie_remove(struct platform_device *pdev)
  1388. {
  1389. struct device *dev = &pdev->dev;
  1390. struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
  1391. pci_stop_root_bus(rockchip->root_bus);
  1392. pci_remove_root_bus(rockchip->root_bus);
  1393. pci_unmap_iospace(rockchip->io);
  1394. irq_domain_remove(rockchip->irq_domain);
  1395. rockchip_pcie_deinit_phys(rockchip);
  1396. rockchip_pcie_disable_clocks(rockchip);
  1397. if (!IS_ERR(rockchip->vpcie12v))
  1398. regulator_disable(rockchip->vpcie12v);
  1399. if (!IS_ERR(rockchip->vpcie3v3))
  1400. regulator_disable(rockchip->vpcie3v3);
  1401. if (!IS_ERR(rockchip->vpcie1v8))
  1402. regulator_disable(rockchip->vpcie1v8);
  1403. if (!IS_ERR(rockchip->vpcie0v9))
  1404. regulator_disable(rockchip->vpcie0v9);
  1405. return 0;
  1406. }
  1407. static const struct dev_pm_ops rockchip_pcie_pm_ops = {
  1408. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
  1409. rockchip_pcie_resume_noirq)
  1410. };
  1411. static const struct of_device_id rockchip_pcie_of_match[] = {
  1412. { .compatible = "rockchip,rk3399-pcie", },
  1413. {}
  1414. };
  1415. MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
  1416. static struct platform_driver rockchip_pcie_driver = {
  1417. .driver = {
  1418. .name = "rockchip-pcie",
  1419. .of_match_table = rockchip_pcie_of_match,
  1420. .pm = &rockchip_pcie_pm_ops,
  1421. },
  1422. .probe = rockchip_pcie_probe,
  1423. .remove = rockchip_pcie_remove,
  1424. };
  1425. module_platform_driver(rockchip_pcie_driver);
  1426. MODULE_AUTHOR("Rockchip Inc");
  1427. MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
  1428. MODULE_LICENSE("GPL v2");