pcie-mediatek.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * MediaTek PCIe host controller driver.
  4. *
  5. * Copyright (c) 2017 MediaTek Inc.
  6. * Author: Ryder Lee <ryder.lee@mediatek.com>
  7. * Honghui Zhang <honghui.zhang@mediatek.com>
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/delay.h>
  11. #include <linux/iopoll.h>
  12. #include <linux/irq.h>
  13. #include <linux/irqdomain.h>
  14. #include <linux/kernel.h>
  15. #include <linux/of_address.h>
  16. #include <linux/of_pci.h>
  17. #include <linux/of_platform.h>
  18. #include <linux/pci.h>
  19. #include <linux/phy/phy.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/reset.h>
  23. /* PCIe shared registers */
  24. #define PCIE_SYS_CFG 0x00
  25. #define PCIE_INT_ENABLE 0x0c
  26. #define PCIE_CFG_ADDR 0x20
  27. #define PCIE_CFG_DATA 0x24
  28. /* PCIe per port registers */
  29. #define PCIE_BAR0_SETUP 0x10
  30. #define PCIE_CLASS 0x34
  31. #define PCIE_LINK_STATUS 0x50
  32. #define PCIE_PORT_INT_EN(x) BIT(20 + (x))
  33. #define PCIE_PORT_PERST(x) BIT(1 + (x))
  34. #define PCIE_PORT_LINKUP BIT(0)
  35. #define PCIE_BAR_MAP_MAX GENMASK(31, 16)
  36. #define PCIE_BAR_ENABLE BIT(0)
  37. #define PCIE_REVISION_ID BIT(0)
  38. #define PCIE_CLASS_CODE (0x60400 << 8)
  39. #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \
  40. ((((regn) >> 8) & GENMASK(3, 0)) << 24))
  41. #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8))
  42. #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11))
  43. #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16))
  44. #define PCIE_CONF_ADDR(regn, fun, dev, bus) \
  45. (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
  46. PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
  47. /* MediaTek specific configuration registers */
  48. #define PCIE_FTS_NUM 0x70c
  49. #define PCIE_FTS_NUM_MASK GENMASK(15, 8)
  50. #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
  51. #define PCIE_FC_CREDIT 0x73c
  52. #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
  53. #define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
  54. /* PCIe V2 share registers */
  55. #define PCIE_SYS_CFG_V2 0x0
  56. #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8)
  57. #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8)
  58. /* PCIe V2 per-port registers */
  59. #define PCIE_MSI_VECTOR 0x0c0
  60. #define PCIE_INT_MASK 0x420
  61. #define INTX_MASK GENMASK(19, 16)
  62. #define INTX_SHIFT 16
  63. #define PCIE_INT_STATUS 0x424
  64. #define MSI_STATUS BIT(23)
  65. #define PCIE_IMSI_STATUS 0x42c
  66. #define PCIE_IMSI_ADDR 0x430
  67. #define MSI_MASK BIT(23)
  68. #define MTK_MSI_IRQS_NUM 32
  69. #define PCIE_AHB_TRANS_BASE0_L 0x438
  70. #define PCIE_AHB_TRANS_BASE0_H 0x43c
  71. #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0))
  72. #define PCIE_AXI_WINDOW0 0x448
  73. #define WIN_ENABLE BIT(7)
  74. /* PCIe V2 configuration transaction header */
  75. #define PCIE_CFG_HEADER0 0x460
  76. #define PCIE_CFG_HEADER1 0x464
  77. #define PCIE_CFG_HEADER2 0x468
  78. #define PCIE_CFG_WDATA 0x470
  79. #define PCIE_APP_TLP_REQ 0x488
  80. #define PCIE_CFG_RDATA 0x48c
  81. #define APP_CFG_REQ BIT(0)
  82. #define APP_CPL_STATUS GENMASK(7, 5)
  83. #define CFG_WRRD_TYPE_0 4
  84. #define CFG_WR_FMT 2
  85. #define CFG_RD_FMT 0
  86. #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0))
  87. #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24))
  88. #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29))
  89. #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2))
  90. #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16))
  91. #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19))
  92. #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24))
  93. #define CFG_HEADER_DW0(type, fmt) \
  94. (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
  95. #define CFG_HEADER_DW1(where, size) \
  96. (GENMASK(((size) - 1), 0) << ((where) & 0x3))
  97. #define CFG_HEADER_DW2(regn, fun, dev, bus) \
  98. (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
  99. CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
  100. #define PCIE_RST_CTRL 0x510
  101. #define PCIE_PHY_RSTB BIT(0)
  102. #define PCIE_PIPE_SRSTB BIT(1)
  103. #define PCIE_MAC_SRSTB BIT(2)
  104. #define PCIE_CRSTB BIT(3)
  105. #define PCIE_PERSTB BIT(8)
  106. #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13)
  107. #define PCIE_LINK_STATUS_V2 0x804
  108. #define PCIE_PORT_LINKUP_V2 BIT(10)
  109. struct mtk_pcie_port;
  110. /**
  111. * struct mtk_pcie_soc - differentiate between host generations
  112. * @has_msi: whether this host supports MSI interrupts or not
  113. * @ops: pointer to configuration access functions
  114. * @startup: pointer to controller setting functions
  115. * @setup_irq: pointer to initialize IRQ functions
  116. */
  117. struct mtk_pcie_soc {
  118. bool has_msi;
  119. struct pci_ops *ops;
  120. int (*startup)(struct mtk_pcie_port *port);
  121. int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
  122. };
  123. /**
  124. * struct mtk_pcie_port - PCIe port information
  125. * @base: IO mapped register base
  126. * @list: port list
  127. * @pcie: pointer to PCIe host info
  128. * @reset: pointer to port reset control
  129. * @sys_ck: pointer to transaction/data link layer clock
  130. * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
  131. * and RC initiated MMIO access
  132. * @axi_ck: pointer to application layer MMIO channel operating clock
  133. * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
  134. * when pcie_mac_ck/pcie_pipe_ck is turned off
  135. * @obff_ck: pointer to OBFF functional block operating clock
  136. * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
  137. * @phy: pointer to PHY control block
  138. * @lane: lane count
  139. * @slot: port slot
  140. * @irq_domain: legacy INTx IRQ domain
  141. * @msi_domain: MSI IRQ domain
  142. * @msi_irq_in_use: bit map for assigned MSI IRQ
  143. */
  144. struct mtk_pcie_port {
  145. void __iomem *base;
  146. struct list_head list;
  147. struct mtk_pcie *pcie;
  148. struct reset_control *reset;
  149. struct clk *sys_ck;
  150. struct clk *ahb_ck;
  151. struct clk *axi_ck;
  152. struct clk *aux_ck;
  153. struct clk *obff_ck;
  154. struct clk *pipe_ck;
  155. struct phy *phy;
  156. u32 lane;
  157. u32 slot;
  158. struct irq_domain *irq_domain;
  159. struct irq_domain *msi_domain;
  160. DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
  161. };
  162. /**
  163. * struct mtk_pcie - PCIe host information
  164. * @dev: pointer to PCIe device
  165. * @base: IO mapped register base
  166. * @free_ck: free-run reference clock
  167. * @io: IO resource
  168. * @pio: PIO resource
  169. * @mem: non-prefetchable memory resource
  170. * @busn: bus range
  171. * @offset: IO / Memory offset
  172. * @ports: pointer to PCIe port information
  173. * @soc: pointer to SoC-dependent operations
  174. */
  175. struct mtk_pcie {
  176. struct device *dev;
  177. void __iomem *base;
  178. struct clk *free_ck;
  179. struct resource io;
  180. struct resource pio;
  181. struct resource mem;
  182. struct resource busn;
  183. struct {
  184. resource_size_t mem;
  185. resource_size_t io;
  186. } offset;
  187. struct list_head ports;
  188. const struct mtk_pcie_soc *soc;
  189. };
  190. static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
  191. {
  192. struct device *dev = pcie->dev;
  193. clk_disable_unprepare(pcie->free_ck);
  194. if (dev->pm_domain) {
  195. pm_runtime_put_sync(dev);
  196. pm_runtime_disable(dev);
  197. }
  198. }
  199. static void mtk_pcie_port_free(struct mtk_pcie_port *port)
  200. {
  201. struct mtk_pcie *pcie = port->pcie;
  202. struct device *dev = pcie->dev;
  203. devm_iounmap(dev, port->base);
  204. list_del(&port->list);
  205. devm_kfree(dev, port);
  206. }
  207. static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
  208. {
  209. struct mtk_pcie_port *port, *tmp;
  210. list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
  211. phy_power_off(port->phy);
  212. phy_exit(port->phy);
  213. clk_disable_unprepare(port->pipe_ck);
  214. clk_disable_unprepare(port->obff_ck);
  215. clk_disable_unprepare(port->axi_ck);
  216. clk_disable_unprepare(port->aux_ck);
  217. clk_disable_unprepare(port->ahb_ck);
  218. clk_disable_unprepare(port->sys_ck);
  219. mtk_pcie_port_free(port);
  220. }
  221. mtk_pcie_subsys_powerdown(pcie);
  222. }
  223. static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
  224. {
  225. u32 val;
  226. int err;
  227. err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
  228. !(val & APP_CFG_REQ), 10,
  229. 100 * USEC_PER_MSEC);
  230. if (err)
  231. return PCIBIOS_SET_FAILED;
  232. if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
  233. return PCIBIOS_SET_FAILED;
  234. return PCIBIOS_SUCCESSFUL;
  235. }
  236. static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
  237. int where, int size, u32 *val)
  238. {
  239. u32 tmp;
  240. /* Write PCIe configuration transaction header for Cfgrd */
  241. writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
  242. port->base + PCIE_CFG_HEADER0);
  243. writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
  244. writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
  245. port->base + PCIE_CFG_HEADER2);
  246. /* Trigger h/w to transmit Cfgrd TLP */
  247. tmp = readl(port->base + PCIE_APP_TLP_REQ);
  248. tmp |= APP_CFG_REQ;
  249. writel(tmp, port->base + PCIE_APP_TLP_REQ);
  250. /* Check completion status */
  251. if (mtk_pcie_check_cfg_cpld(port))
  252. return PCIBIOS_SET_FAILED;
  253. /* Read cpld payload of Cfgrd */
  254. *val = readl(port->base + PCIE_CFG_RDATA);
  255. if (size == 1)
  256. *val = (*val >> (8 * (where & 3))) & 0xff;
  257. else if (size == 2)
  258. *val = (*val >> (8 * (where & 3))) & 0xffff;
  259. return PCIBIOS_SUCCESSFUL;
  260. }
  261. static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
  262. int where, int size, u32 val)
  263. {
  264. /* Write PCIe configuration transaction header for Cfgwr */
  265. writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
  266. port->base + PCIE_CFG_HEADER0);
  267. writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
  268. writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
  269. port->base + PCIE_CFG_HEADER2);
  270. /* Write Cfgwr data */
  271. val = val << 8 * (where & 3);
  272. writel(val, port->base + PCIE_CFG_WDATA);
  273. /* Trigger h/w to transmit Cfgwr TLP */
  274. val = readl(port->base + PCIE_APP_TLP_REQ);
  275. val |= APP_CFG_REQ;
  276. writel(val, port->base + PCIE_APP_TLP_REQ);
  277. /* Check completion status */
  278. return mtk_pcie_check_cfg_cpld(port);
  279. }
  280. static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
  281. unsigned int devfn)
  282. {
  283. struct mtk_pcie *pcie = bus->sysdata;
  284. struct mtk_pcie_port *port;
  285. list_for_each_entry(port, &pcie->ports, list)
  286. if (port->slot == PCI_SLOT(devfn))
  287. return port;
  288. return NULL;
  289. }
  290. static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
  291. int where, int size, u32 *val)
  292. {
  293. struct mtk_pcie_port *port;
  294. u32 bn = bus->number;
  295. int ret;
  296. port = mtk_pcie_find_port(bus, devfn);
  297. if (!port) {
  298. *val = ~0;
  299. return PCIBIOS_DEVICE_NOT_FOUND;
  300. }
  301. ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
  302. if (ret)
  303. *val = ~0;
  304. return ret;
  305. }
  306. static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
  307. int where, int size, u32 val)
  308. {
  309. struct mtk_pcie_port *port;
  310. u32 bn = bus->number;
  311. port = mtk_pcie_find_port(bus, devfn);
  312. if (!port)
  313. return PCIBIOS_DEVICE_NOT_FOUND;
  314. return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
  315. }
  316. static struct pci_ops mtk_pcie_ops_v2 = {
  317. .read = mtk_pcie_config_read,
  318. .write = mtk_pcie_config_write,
  319. };
  320. static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
  321. {
  322. struct mtk_pcie *pcie = port->pcie;
  323. struct resource *mem = &pcie->mem;
  324. u32 val;
  325. size_t size;
  326. int err;
  327. /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
  328. if (pcie->base) {
  329. val = readl(pcie->base + PCIE_SYS_CFG_V2);
  330. val |= PCIE_CSR_LTSSM_EN(port->slot) |
  331. PCIE_CSR_ASPM_L1_EN(port->slot);
  332. writel(val, pcie->base + PCIE_SYS_CFG_V2);
  333. }
  334. /* Assert all reset signals */
  335. writel(0, port->base + PCIE_RST_CTRL);
  336. /*
  337. * Enable PCIe link down reset, if link status changed from link up to
  338. * link down, this will reset MAC control registers and configuration
  339. * space.
  340. */
  341. writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
  342. /* De-assert PHY, PE, PIPE, MAC and configuration reset */
  343. val = readl(port->base + PCIE_RST_CTRL);
  344. val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
  345. PCIE_MAC_SRSTB | PCIE_CRSTB;
  346. writel(val, port->base + PCIE_RST_CTRL);
  347. /* 100ms timeout value should be enough for Gen1/2 training */
  348. err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
  349. !!(val & PCIE_PORT_LINKUP_V2), 20,
  350. 100 * USEC_PER_MSEC);
  351. if (err)
  352. return -ETIMEDOUT;
  353. /* Set INTx mask */
  354. val = readl(port->base + PCIE_INT_MASK);
  355. val &= ~INTX_MASK;
  356. writel(val, port->base + PCIE_INT_MASK);
  357. /* Set AHB to PCIe translation windows */
  358. size = mem->end - mem->start;
  359. val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
  360. writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
  361. val = upper_32_bits(mem->start);
  362. writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
  363. /* Set PCIe to AXI translation memory space.*/
  364. val = fls(0xffffffff) | WIN_ENABLE;
  365. writel(val, port->base + PCIE_AXI_WINDOW0);
  366. return 0;
  367. }
  368. static int mtk_pcie_msi_alloc(struct mtk_pcie_port *port)
  369. {
  370. int msi;
  371. msi = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
  372. if (msi < MTK_MSI_IRQS_NUM)
  373. set_bit(msi, port->msi_irq_in_use);
  374. else
  375. return -ENOSPC;
  376. return msi;
  377. }
  378. static void mtk_pcie_msi_free(struct mtk_pcie_port *port, unsigned long hwirq)
  379. {
  380. clear_bit(hwirq, port->msi_irq_in_use);
  381. }
  382. static int mtk_pcie_msi_setup_irq(struct msi_controller *chip,
  383. struct pci_dev *pdev, struct msi_desc *desc)
  384. {
  385. struct mtk_pcie_port *port;
  386. struct msi_msg msg;
  387. unsigned int irq;
  388. int hwirq;
  389. phys_addr_t msg_addr;
  390. port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
  391. if (!port)
  392. return -EINVAL;
  393. hwirq = mtk_pcie_msi_alloc(port);
  394. if (hwirq < 0)
  395. return hwirq;
  396. irq = irq_create_mapping(port->msi_domain, hwirq);
  397. if (!irq) {
  398. mtk_pcie_msi_free(port, hwirq);
  399. return -EINVAL;
  400. }
  401. chip->dev = &pdev->dev;
  402. irq_set_msi_desc(irq, desc);
  403. /* MT2712/MT7622 only support 32-bit MSI addresses */
  404. msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
  405. msg.address_hi = 0;
  406. msg.address_lo = lower_32_bits(msg_addr);
  407. msg.data = hwirq;
  408. pci_write_msi_msg(irq, &msg);
  409. return 0;
  410. }
  411. static void mtk_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
  412. {
  413. struct pci_dev *pdev = to_pci_dev(chip->dev);
  414. struct irq_data *d = irq_get_irq_data(irq);
  415. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  416. struct mtk_pcie_port *port;
  417. port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
  418. if (!port)
  419. return;
  420. irq_dispose_mapping(irq);
  421. mtk_pcie_msi_free(port, hwirq);
  422. }
  423. static struct msi_controller mtk_pcie_msi_chip = {
  424. .setup_irq = mtk_pcie_msi_setup_irq,
  425. .teardown_irq = mtk_msi_teardown_irq,
  426. };
  427. static struct irq_chip mtk_msi_irq_chip = {
  428. .name = "MTK PCIe MSI",
  429. .irq_enable = pci_msi_unmask_irq,
  430. .irq_disable = pci_msi_mask_irq,
  431. .irq_mask = pci_msi_mask_irq,
  432. .irq_unmask = pci_msi_unmask_irq,
  433. };
  434. static int mtk_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
  435. irq_hw_number_t hwirq)
  436. {
  437. irq_set_chip_and_handler(irq, &mtk_msi_irq_chip, handle_simple_irq);
  438. irq_set_chip_data(irq, domain->host_data);
  439. return 0;
  440. }
  441. static const struct irq_domain_ops msi_domain_ops = {
  442. .map = mtk_pcie_msi_map,
  443. };
  444. static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
  445. {
  446. u32 val;
  447. phys_addr_t msg_addr;
  448. msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
  449. val = lower_32_bits(msg_addr);
  450. writel(val, port->base + PCIE_IMSI_ADDR);
  451. val = readl(port->base + PCIE_INT_MASK);
  452. val &= ~MSI_MASK;
  453. writel(val, port->base + PCIE_INT_MASK);
  454. }
  455. static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
  456. irq_hw_number_t hwirq)
  457. {
  458. irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
  459. irq_set_chip_data(irq, domain->host_data);
  460. return 0;
  461. }
  462. static const struct irq_domain_ops intx_domain_ops = {
  463. .map = mtk_pcie_intx_map,
  464. };
  465. static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
  466. struct device_node *node)
  467. {
  468. struct device *dev = port->pcie->dev;
  469. struct device_node *pcie_intc_node;
  470. /* Setup INTx */
  471. pcie_intc_node = of_get_next_child(node, NULL);
  472. if (!pcie_intc_node) {
  473. dev_err(dev, "no PCIe Intc node found\n");
  474. return -ENODEV;
  475. }
  476. port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
  477. &intx_domain_ops, port);
  478. if (!port->irq_domain) {
  479. dev_err(dev, "failed to get INTx IRQ domain\n");
  480. return -ENODEV;
  481. }
  482. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  483. port->msi_domain = irq_domain_add_linear(node, MTK_MSI_IRQS_NUM,
  484. &msi_domain_ops,
  485. &mtk_pcie_msi_chip);
  486. if (!port->msi_domain) {
  487. dev_err(dev, "failed to create MSI IRQ domain\n");
  488. return -ENODEV;
  489. }
  490. mtk_pcie_enable_msi(port);
  491. }
  492. return 0;
  493. }
  494. static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
  495. {
  496. struct mtk_pcie_port *port = (struct mtk_pcie_port *)data;
  497. unsigned long status;
  498. u32 virq;
  499. u32 bit = INTX_SHIFT;
  500. while ((status = readl(port->base + PCIE_INT_STATUS)) & INTX_MASK) {
  501. for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
  502. /* Clear the INTx */
  503. writel(1 << bit, port->base + PCIE_INT_STATUS);
  504. virq = irq_find_mapping(port->irq_domain,
  505. bit - INTX_SHIFT);
  506. generic_handle_irq(virq);
  507. }
  508. }
  509. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  510. while ((status = readl(port->base + PCIE_INT_STATUS)) & MSI_STATUS) {
  511. unsigned long imsi_status;
  512. while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
  513. for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
  514. /* Clear the MSI */
  515. writel(1 << bit, port->base + PCIE_IMSI_STATUS);
  516. virq = irq_find_mapping(port->msi_domain, bit);
  517. generic_handle_irq(virq);
  518. }
  519. }
  520. /* Clear MSI interrupt status */
  521. writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
  522. }
  523. }
  524. return IRQ_HANDLED;
  525. }
  526. static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
  527. struct device_node *node)
  528. {
  529. struct mtk_pcie *pcie = port->pcie;
  530. struct device *dev = pcie->dev;
  531. struct platform_device *pdev = to_platform_device(dev);
  532. int err, irq;
  533. irq = platform_get_irq(pdev, port->slot);
  534. err = devm_request_irq(dev, irq, mtk_pcie_intr_handler,
  535. IRQF_SHARED, "mtk-pcie", port);
  536. if (err) {
  537. dev_err(dev, "unable to request IRQ %d\n", irq);
  538. return err;
  539. }
  540. err = mtk_pcie_init_irq_domain(port, node);
  541. if (err) {
  542. dev_err(dev, "failed to init PCIe IRQ domain\n");
  543. return err;
  544. }
  545. return 0;
  546. }
  547. static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
  548. unsigned int devfn, int where)
  549. {
  550. struct mtk_pcie *pcie = bus->sysdata;
  551. writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
  552. bus->number), pcie->base + PCIE_CFG_ADDR);
  553. return pcie->base + PCIE_CFG_DATA + (where & 3);
  554. }
  555. static struct pci_ops mtk_pcie_ops = {
  556. .map_bus = mtk_pcie_map_bus,
  557. .read = pci_generic_config_read,
  558. .write = pci_generic_config_write,
  559. };
  560. static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
  561. {
  562. struct mtk_pcie *pcie = port->pcie;
  563. u32 func = PCI_FUNC(port->slot << 3);
  564. u32 slot = PCI_SLOT(port->slot << 3);
  565. u32 val;
  566. int err;
  567. /* assert port PERST_N */
  568. val = readl(pcie->base + PCIE_SYS_CFG);
  569. val |= PCIE_PORT_PERST(port->slot);
  570. writel(val, pcie->base + PCIE_SYS_CFG);
  571. /* de-assert port PERST_N */
  572. val = readl(pcie->base + PCIE_SYS_CFG);
  573. val &= ~PCIE_PORT_PERST(port->slot);
  574. writel(val, pcie->base + PCIE_SYS_CFG);
  575. /* 100ms timeout value should be enough for Gen1/2 training */
  576. err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
  577. !!(val & PCIE_PORT_LINKUP), 20,
  578. 100 * USEC_PER_MSEC);
  579. if (err)
  580. return -ETIMEDOUT;
  581. /* enable interrupt */
  582. val = readl(pcie->base + PCIE_INT_ENABLE);
  583. val |= PCIE_PORT_INT_EN(port->slot);
  584. writel(val, pcie->base + PCIE_INT_ENABLE);
  585. /* map to all DDR region. We need to set it before cfg operation. */
  586. writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
  587. port->base + PCIE_BAR0_SETUP);
  588. /* configure class code and revision ID */
  589. writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
  590. /* configure FC credit */
  591. writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
  592. pcie->base + PCIE_CFG_ADDR);
  593. val = readl(pcie->base + PCIE_CFG_DATA);
  594. val &= ~PCIE_FC_CREDIT_MASK;
  595. val |= PCIE_FC_CREDIT_VAL(0x806c);
  596. writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
  597. pcie->base + PCIE_CFG_ADDR);
  598. writel(val, pcie->base + PCIE_CFG_DATA);
  599. /* configure RC FTS number to 250 when it leaves L0s */
  600. writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
  601. pcie->base + PCIE_CFG_ADDR);
  602. val = readl(pcie->base + PCIE_CFG_DATA);
  603. val &= ~PCIE_FTS_NUM_MASK;
  604. val |= PCIE_FTS_NUM_L0(0x50);
  605. writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
  606. pcie->base + PCIE_CFG_ADDR);
  607. writel(val, pcie->base + PCIE_CFG_DATA);
  608. return 0;
  609. }
  610. static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
  611. {
  612. struct mtk_pcie *pcie = port->pcie;
  613. struct device *dev = pcie->dev;
  614. int err;
  615. err = clk_prepare_enable(port->sys_ck);
  616. if (err) {
  617. dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
  618. goto err_sys_clk;
  619. }
  620. err = clk_prepare_enable(port->ahb_ck);
  621. if (err) {
  622. dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
  623. goto err_ahb_clk;
  624. }
  625. err = clk_prepare_enable(port->aux_ck);
  626. if (err) {
  627. dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
  628. goto err_aux_clk;
  629. }
  630. err = clk_prepare_enable(port->axi_ck);
  631. if (err) {
  632. dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
  633. goto err_axi_clk;
  634. }
  635. err = clk_prepare_enable(port->obff_ck);
  636. if (err) {
  637. dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
  638. goto err_obff_clk;
  639. }
  640. err = clk_prepare_enable(port->pipe_ck);
  641. if (err) {
  642. dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
  643. goto err_pipe_clk;
  644. }
  645. reset_control_assert(port->reset);
  646. reset_control_deassert(port->reset);
  647. err = phy_init(port->phy);
  648. if (err) {
  649. dev_err(dev, "failed to initialize port%d phy\n", port->slot);
  650. goto err_phy_init;
  651. }
  652. err = phy_power_on(port->phy);
  653. if (err) {
  654. dev_err(dev, "failed to power on port%d phy\n", port->slot);
  655. goto err_phy_on;
  656. }
  657. if (!pcie->soc->startup(port))
  658. return;
  659. dev_info(dev, "Port%d link down\n", port->slot);
  660. phy_power_off(port->phy);
  661. err_phy_on:
  662. phy_exit(port->phy);
  663. err_phy_init:
  664. clk_disable_unprepare(port->pipe_ck);
  665. err_pipe_clk:
  666. clk_disable_unprepare(port->obff_ck);
  667. err_obff_clk:
  668. clk_disable_unprepare(port->axi_ck);
  669. err_axi_clk:
  670. clk_disable_unprepare(port->aux_ck);
  671. err_aux_clk:
  672. clk_disable_unprepare(port->ahb_ck);
  673. err_ahb_clk:
  674. clk_disable_unprepare(port->sys_ck);
  675. err_sys_clk:
  676. mtk_pcie_port_free(port);
  677. }
  678. static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
  679. struct device_node *node,
  680. int slot)
  681. {
  682. struct mtk_pcie_port *port;
  683. struct resource *regs;
  684. struct device *dev = pcie->dev;
  685. struct platform_device *pdev = to_platform_device(dev);
  686. char name[10];
  687. int err;
  688. port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
  689. if (!port)
  690. return -ENOMEM;
  691. err = of_property_read_u32(node, "num-lanes", &port->lane);
  692. if (err) {
  693. dev_err(dev, "missing num-lanes property\n");
  694. return err;
  695. }
  696. snprintf(name, sizeof(name), "port%d", slot);
  697. regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  698. port->base = devm_ioremap_resource(dev, regs);
  699. if (IS_ERR(port->base)) {
  700. dev_err(dev, "failed to map port%d base\n", slot);
  701. return PTR_ERR(port->base);
  702. }
  703. snprintf(name, sizeof(name), "sys_ck%d", slot);
  704. port->sys_ck = devm_clk_get(dev, name);
  705. if (IS_ERR(port->sys_ck)) {
  706. dev_err(dev, "failed to get sys_ck%d clock\n", slot);
  707. return PTR_ERR(port->sys_ck);
  708. }
  709. /* sys_ck might be divided into the following parts in some chips */
  710. snprintf(name, sizeof(name), "ahb_ck%d", slot);
  711. port->ahb_ck = devm_clk_get(dev, name);
  712. if (IS_ERR(port->ahb_ck)) {
  713. if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER)
  714. return -EPROBE_DEFER;
  715. port->ahb_ck = NULL;
  716. }
  717. snprintf(name, sizeof(name), "axi_ck%d", slot);
  718. port->axi_ck = devm_clk_get(dev, name);
  719. if (IS_ERR(port->axi_ck)) {
  720. if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER)
  721. return -EPROBE_DEFER;
  722. port->axi_ck = NULL;
  723. }
  724. snprintf(name, sizeof(name), "aux_ck%d", slot);
  725. port->aux_ck = devm_clk_get(dev, name);
  726. if (IS_ERR(port->aux_ck)) {
  727. if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER)
  728. return -EPROBE_DEFER;
  729. port->aux_ck = NULL;
  730. }
  731. snprintf(name, sizeof(name), "obff_ck%d", slot);
  732. port->obff_ck = devm_clk_get(dev, name);
  733. if (IS_ERR(port->obff_ck)) {
  734. if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER)
  735. return -EPROBE_DEFER;
  736. port->obff_ck = NULL;
  737. }
  738. snprintf(name, sizeof(name), "pipe_ck%d", slot);
  739. port->pipe_ck = devm_clk_get(dev, name);
  740. if (IS_ERR(port->pipe_ck)) {
  741. if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER)
  742. return -EPROBE_DEFER;
  743. port->pipe_ck = NULL;
  744. }
  745. snprintf(name, sizeof(name), "pcie-rst%d", slot);
  746. port->reset = devm_reset_control_get_optional_exclusive(dev, name);
  747. if (PTR_ERR(port->reset) == -EPROBE_DEFER)
  748. return PTR_ERR(port->reset);
  749. /* some platforms may use default PHY setting */
  750. snprintf(name, sizeof(name), "pcie-phy%d", slot);
  751. port->phy = devm_phy_optional_get(dev, name);
  752. if (IS_ERR(port->phy))
  753. return PTR_ERR(port->phy);
  754. port->slot = slot;
  755. port->pcie = pcie;
  756. if (pcie->soc->setup_irq) {
  757. err = pcie->soc->setup_irq(port, node);
  758. if (err)
  759. return err;
  760. }
  761. INIT_LIST_HEAD(&port->list);
  762. list_add_tail(&port->list, &pcie->ports);
  763. return 0;
  764. }
  765. static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
  766. {
  767. struct device *dev = pcie->dev;
  768. struct platform_device *pdev = to_platform_device(dev);
  769. struct resource *regs;
  770. int err;
  771. /* get shared registers, which are optional */
  772. regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
  773. if (regs) {
  774. pcie->base = devm_ioremap_resource(dev, regs);
  775. if (IS_ERR(pcie->base)) {
  776. dev_err(dev, "failed to map shared register\n");
  777. return PTR_ERR(pcie->base);
  778. }
  779. }
  780. pcie->free_ck = devm_clk_get(dev, "free_ck");
  781. if (IS_ERR(pcie->free_ck)) {
  782. if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
  783. return -EPROBE_DEFER;
  784. pcie->free_ck = NULL;
  785. }
  786. if (dev->pm_domain) {
  787. pm_runtime_enable(dev);
  788. pm_runtime_get_sync(dev);
  789. }
  790. /* enable top level clock */
  791. err = clk_prepare_enable(pcie->free_ck);
  792. if (err) {
  793. dev_err(dev, "failed to enable free_ck\n");
  794. goto err_free_ck;
  795. }
  796. return 0;
  797. err_free_ck:
  798. if (dev->pm_domain) {
  799. pm_runtime_put_sync(dev);
  800. pm_runtime_disable(dev);
  801. }
  802. return err;
  803. }
  804. static int mtk_pcie_setup(struct mtk_pcie *pcie)
  805. {
  806. struct device *dev = pcie->dev;
  807. struct device_node *node = dev->of_node, *child;
  808. struct of_pci_range_parser parser;
  809. struct of_pci_range range;
  810. struct resource res;
  811. struct mtk_pcie_port *port, *tmp;
  812. int err;
  813. if (of_pci_range_parser_init(&parser, node)) {
  814. dev_err(dev, "missing \"ranges\" property\n");
  815. return -EINVAL;
  816. }
  817. for_each_of_pci_range(&parser, &range) {
  818. err = of_pci_range_to_resource(&range, node, &res);
  819. if (err < 0)
  820. return err;
  821. switch (res.flags & IORESOURCE_TYPE_BITS) {
  822. case IORESOURCE_IO:
  823. pcie->offset.io = res.start - range.pci_addr;
  824. memcpy(&pcie->pio, &res, sizeof(res));
  825. pcie->pio.name = node->full_name;
  826. pcie->io.start = range.cpu_addr;
  827. pcie->io.end = range.cpu_addr + range.size - 1;
  828. pcie->io.flags = IORESOURCE_MEM;
  829. pcie->io.name = "I/O";
  830. memcpy(&res, &pcie->io, sizeof(res));
  831. break;
  832. case IORESOURCE_MEM:
  833. pcie->offset.mem = res.start - range.pci_addr;
  834. memcpy(&pcie->mem, &res, sizeof(res));
  835. pcie->mem.name = "non-prefetchable";
  836. break;
  837. }
  838. }
  839. err = of_pci_parse_bus_range(node, &pcie->busn);
  840. if (err < 0) {
  841. dev_err(dev, "failed to parse bus ranges property: %d\n", err);
  842. pcie->busn.name = node->name;
  843. pcie->busn.start = 0;
  844. pcie->busn.end = 0xff;
  845. pcie->busn.flags = IORESOURCE_BUS;
  846. }
  847. for_each_available_child_of_node(node, child) {
  848. int slot;
  849. err = of_pci_get_devfn(child);
  850. if (err < 0) {
  851. dev_err(dev, "failed to parse devfn: %d\n", err);
  852. return err;
  853. }
  854. slot = PCI_SLOT(err);
  855. err = mtk_pcie_parse_port(pcie, child, slot);
  856. if (err)
  857. return err;
  858. }
  859. err = mtk_pcie_subsys_powerup(pcie);
  860. if (err)
  861. return err;
  862. /* enable each port, and then check link status */
  863. list_for_each_entry_safe(port, tmp, &pcie->ports, list)
  864. mtk_pcie_enable_port(port);
  865. /* power down PCIe subsys if slots are all empty (link down) */
  866. if (list_empty(&pcie->ports))
  867. mtk_pcie_subsys_powerdown(pcie);
  868. return 0;
  869. }
  870. static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
  871. {
  872. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  873. struct list_head *windows = &host->windows;
  874. struct device *dev = pcie->dev;
  875. int err;
  876. pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
  877. pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
  878. pci_add_resource(windows, &pcie->busn);
  879. err = devm_request_pci_bus_resources(dev, windows);
  880. if (err < 0)
  881. return err;
  882. pci_remap_iospace(&pcie->pio, pcie->io.start);
  883. return 0;
  884. }
  885. static int mtk_pcie_register_host(struct pci_host_bridge *host)
  886. {
  887. struct mtk_pcie *pcie = pci_host_bridge_priv(host);
  888. struct pci_bus *child;
  889. int err;
  890. host->busnr = pcie->busn.start;
  891. host->dev.parent = pcie->dev;
  892. host->ops = pcie->soc->ops;
  893. host->map_irq = of_irq_parse_and_map_pci;
  894. host->swizzle_irq = pci_common_swizzle;
  895. host->sysdata = pcie;
  896. if (IS_ENABLED(CONFIG_PCI_MSI) && pcie->soc->has_msi)
  897. host->msi = &mtk_pcie_msi_chip;
  898. err = pci_scan_root_bus_bridge(host);
  899. if (err < 0)
  900. return err;
  901. pci_bus_size_bridges(host->bus);
  902. pci_bus_assign_resources(host->bus);
  903. list_for_each_entry(child, &host->bus->children, node)
  904. pcie_bus_configure_settings(child);
  905. pci_bus_add_devices(host->bus);
  906. return 0;
  907. }
  908. static int mtk_pcie_probe(struct platform_device *pdev)
  909. {
  910. struct device *dev = &pdev->dev;
  911. struct mtk_pcie *pcie;
  912. struct pci_host_bridge *host;
  913. int err;
  914. host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
  915. if (!host)
  916. return -ENOMEM;
  917. pcie = pci_host_bridge_priv(host);
  918. pcie->dev = dev;
  919. pcie->soc = of_device_get_match_data(dev);
  920. platform_set_drvdata(pdev, pcie);
  921. INIT_LIST_HEAD(&pcie->ports);
  922. err = mtk_pcie_setup(pcie);
  923. if (err)
  924. return err;
  925. err = mtk_pcie_request_resources(pcie);
  926. if (err)
  927. goto put_resources;
  928. err = mtk_pcie_register_host(host);
  929. if (err)
  930. goto put_resources;
  931. return 0;
  932. put_resources:
  933. if (!list_empty(&pcie->ports))
  934. mtk_pcie_put_resources(pcie);
  935. return err;
  936. }
  937. static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
  938. .ops = &mtk_pcie_ops,
  939. .startup = mtk_pcie_startup_port,
  940. };
  941. static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
  942. .has_msi = true,
  943. .ops = &mtk_pcie_ops_v2,
  944. .startup = mtk_pcie_startup_port_v2,
  945. .setup_irq = mtk_pcie_setup_irq,
  946. };
  947. static const struct of_device_id mtk_pcie_ids[] = {
  948. { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
  949. { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
  950. { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_v2 },
  951. { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_v2 },
  952. {},
  953. };
  954. static struct platform_driver mtk_pcie_driver = {
  955. .probe = mtk_pcie_probe,
  956. .driver = {
  957. .name = "mtk-pcie",
  958. .of_match_table = mtk_pcie_ids,
  959. .suppress_bind_attrs = true,
  960. },
  961. };
  962. builtin_platform_driver(mtk_pcie_driver);