pcie-mediatek.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * MediaTek PCIe host controller driver.
  4. *
  5. * Copyright (c) 2017 MediaTek Inc.
  6. * Author: Ryder Lee <ryder.lee@mediatek.com>
  7. * Honghui Zhang <honghui.zhang@mediatek.com>
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/delay.h>
  11. #include <linux/iopoll.h>
  12. #include <linux/irq.h>
  13. #include <linux/irqchip/chained_irq.h>
  14. #include <linux/irqdomain.h>
  15. #include <linux/kernel.h>
  16. #include <linux/msi.h>
  17. #include <linux/of_address.h>
  18. #include <linux/of_pci.h>
  19. #include <linux/of_platform.h>
  20. #include <linux/pci.h>
  21. #include <linux/phy/phy.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/reset.h>
  25. #include "../pci.h"
  26. /* PCIe shared registers */
  27. #define PCIE_SYS_CFG 0x00
  28. #define PCIE_INT_ENABLE 0x0c
  29. #define PCIE_CFG_ADDR 0x20
  30. #define PCIE_CFG_DATA 0x24
  31. /* PCIe per port registers */
  32. #define PCIE_BAR0_SETUP 0x10
  33. #define PCIE_CLASS 0x34
  34. #define PCIE_LINK_STATUS 0x50
  35. #define PCIE_PORT_INT_EN(x) BIT(20 + (x))
  36. #define PCIE_PORT_PERST(x) BIT(1 + (x))
  37. #define PCIE_PORT_LINKUP BIT(0)
  38. #define PCIE_BAR_MAP_MAX GENMASK(31, 16)
  39. #define PCIE_BAR_ENABLE BIT(0)
  40. #define PCIE_REVISION_ID BIT(0)
  41. #define PCIE_CLASS_CODE (0x60400 << 8)
  42. #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \
  43. ((((regn) >> 8) & GENMASK(3, 0)) << 24))
  44. #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8))
  45. #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11))
  46. #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16))
  47. #define PCIE_CONF_ADDR(regn, fun, dev, bus) \
  48. (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
  49. PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
  50. /* MediaTek specific configuration registers */
  51. #define PCIE_FTS_NUM 0x70c
  52. #define PCIE_FTS_NUM_MASK GENMASK(15, 8)
  53. #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
  54. #define PCIE_FC_CREDIT 0x73c
  55. #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
  56. #define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
  57. /* PCIe V2 share registers */
  58. #define PCIE_SYS_CFG_V2 0x0
  59. #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8)
  60. #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8)
  61. /* PCIe V2 per-port registers */
  62. #define PCIE_MSI_VECTOR 0x0c0
  63. #define PCIE_CONF_VEND_ID 0x100
  64. #define PCIE_CONF_CLASS_ID 0x106
  65. #define PCIE_INT_MASK 0x420
  66. #define INTX_MASK GENMASK(19, 16)
  67. #define INTX_SHIFT 16
  68. #define PCIE_INT_STATUS 0x424
  69. #define MSI_STATUS BIT(23)
  70. #define PCIE_IMSI_STATUS 0x42c
  71. #define PCIE_IMSI_ADDR 0x430
  72. #define MSI_MASK BIT(23)
  73. #define MTK_MSI_IRQS_NUM 32
  74. #define PCIE_AHB_TRANS_BASE0_L 0x438
  75. #define PCIE_AHB_TRANS_BASE0_H 0x43c
  76. #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0))
  77. #define PCIE_AXI_WINDOW0 0x448
  78. #define WIN_ENABLE BIT(7)
  79. /* PCIe V2 configuration transaction header */
  80. #define PCIE_CFG_HEADER0 0x460
  81. #define PCIE_CFG_HEADER1 0x464
  82. #define PCIE_CFG_HEADER2 0x468
  83. #define PCIE_CFG_WDATA 0x470
  84. #define PCIE_APP_TLP_REQ 0x488
  85. #define PCIE_CFG_RDATA 0x48c
  86. #define APP_CFG_REQ BIT(0)
  87. #define APP_CPL_STATUS GENMASK(7, 5)
  88. #define CFG_WRRD_TYPE_0 4
  89. #define CFG_WR_FMT 2
  90. #define CFG_RD_FMT 0
  91. #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0))
  92. #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24))
  93. #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29))
  94. #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2))
  95. #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16))
  96. #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19))
  97. #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24))
  98. #define CFG_HEADER_DW0(type, fmt) \
  99. (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
  100. #define CFG_HEADER_DW1(where, size) \
  101. (GENMASK(((size) - 1), 0) << ((where) & 0x3))
  102. #define CFG_HEADER_DW2(regn, fun, dev, bus) \
  103. (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
  104. CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
  105. #define PCIE_RST_CTRL 0x510
  106. #define PCIE_PHY_RSTB BIT(0)
  107. #define PCIE_PIPE_SRSTB BIT(1)
  108. #define PCIE_MAC_SRSTB BIT(2)
  109. #define PCIE_CRSTB BIT(3)
  110. #define PCIE_PERSTB BIT(8)
  111. #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13)
  112. #define PCIE_LINK_STATUS_V2 0x804
  113. #define PCIE_PORT_LINKUP_V2 BIT(10)
  114. struct mtk_pcie_port;
  115. /**
  116. * struct mtk_pcie_soc - differentiate between host generations
  117. * @need_fix_class_id: whether this host's class ID needed to be fixed or not
  118. * @ops: pointer to configuration access functions
  119. * @startup: pointer to controller setting functions
  120. * @setup_irq: pointer to initialize IRQ functions
  121. */
  122. struct mtk_pcie_soc {
  123. bool need_fix_class_id;
  124. struct pci_ops *ops;
  125. int (*startup)(struct mtk_pcie_port *port);
  126. int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
  127. };
  128. /**
  129. * struct mtk_pcie_port - PCIe port information
  130. * @base: IO mapped register base
  131. * @list: port list
  132. * @pcie: pointer to PCIe host info
  133. * @reset: pointer to port reset control
  134. * @sys_ck: pointer to transaction/data link layer clock
  135. * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
  136. * and RC initiated MMIO access
  137. * @axi_ck: pointer to application layer MMIO channel operating clock
  138. * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
  139. * when pcie_mac_ck/pcie_pipe_ck is turned off
  140. * @obff_ck: pointer to OBFF functional block operating clock
  141. * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
  142. * @phy: pointer to PHY control block
  143. * @lane: lane count
  144. * @slot: port slot
  145. * @irq_domain: legacy INTx IRQ domain
  146. * @inner_domain: inner IRQ domain
  147. * @msi_domain: MSI IRQ domain
  148. * @lock: protect the msi_irq_in_use bitmap
  149. * @msi_irq_in_use: bit map for assigned MSI IRQ
  150. */
  151. struct mtk_pcie_port {
  152. void __iomem *base;
  153. struct list_head list;
  154. struct mtk_pcie *pcie;
  155. struct reset_control *reset;
  156. struct clk *sys_ck;
  157. struct clk *ahb_ck;
  158. struct clk *axi_ck;
  159. struct clk *aux_ck;
  160. struct clk *obff_ck;
  161. struct clk *pipe_ck;
  162. struct phy *phy;
  163. u32 lane;
  164. u32 slot;
  165. struct irq_domain *irq_domain;
  166. struct irq_domain *inner_domain;
  167. struct irq_domain *msi_domain;
  168. struct mutex lock;
  169. DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
  170. };
  171. /**
  172. * struct mtk_pcie - PCIe host information
  173. * @dev: pointer to PCIe device
  174. * @base: IO mapped register base
  175. * @free_ck: free-run reference clock
  176. * @io: IO resource
  177. * @pio: PIO resource
  178. * @mem: non-prefetchable memory resource
  179. * @busn: bus range
  180. * @offset: IO / Memory offset
  181. * @ports: pointer to PCIe port information
  182. * @soc: pointer to SoC-dependent operations
  183. */
  184. struct mtk_pcie {
  185. struct device *dev;
  186. void __iomem *base;
  187. struct clk *free_ck;
  188. struct resource io;
  189. struct resource pio;
  190. struct resource mem;
  191. struct resource busn;
  192. struct {
  193. resource_size_t mem;
  194. resource_size_t io;
  195. } offset;
  196. struct list_head ports;
  197. const struct mtk_pcie_soc *soc;
  198. };
  199. static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
  200. {
  201. struct device *dev = pcie->dev;
  202. clk_disable_unprepare(pcie->free_ck);
  203. if (dev->pm_domain) {
  204. pm_runtime_put_sync(dev);
  205. pm_runtime_disable(dev);
  206. }
  207. }
  208. static void mtk_pcie_port_free(struct mtk_pcie_port *port)
  209. {
  210. struct mtk_pcie *pcie = port->pcie;
  211. struct device *dev = pcie->dev;
  212. devm_iounmap(dev, port->base);
  213. list_del(&port->list);
  214. devm_kfree(dev, port);
  215. }
  216. static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
  217. {
  218. struct mtk_pcie_port *port, *tmp;
  219. list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
  220. phy_power_off(port->phy);
  221. phy_exit(port->phy);
  222. clk_disable_unprepare(port->pipe_ck);
  223. clk_disable_unprepare(port->obff_ck);
  224. clk_disable_unprepare(port->axi_ck);
  225. clk_disable_unprepare(port->aux_ck);
  226. clk_disable_unprepare(port->ahb_ck);
  227. clk_disable_unprepare(port->sys_ck);
  228. mtk_pcie_port_free(port);
  229. }
  230. mtk_pcie_subsys_powerdown(pcie);
  231. }
  232. static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
  233. {
  234. u32 val;
  235. int err;
  236. err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
  237. !(val & APP_CFG_REQ), 10,
  238. 100 * USEC_PER_MSEC);
  239. if (err)
  240. return PCIBIOS_SET_FAILED;
  241. if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
  242. return PCIBIOS_SET_FAILED;
  243. return PCIBIOS_SUCCESSFUL;
  244. }
  245. static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
  246. int where, int size, u32 *val)
  247. {
  248. u32 tmp;
  249. /* Write PCIe configuration transaction header for Cfgrd */
  250. writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
  251. port->base + PCIE_CFG_HEADER0);
  252. writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
  253. writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
  254. port->base + PCIE_CFG_HEADER2);
  255. /* Trigger h/w to transmit Cfgrd TLP */
  256. tmp = readl(port->base + PCIE_APP_TLP_REQ);
  257. tmp |= APP_CFG_REQ;
  258. writel(tmp, port->base + PCIE_APP_TLP_REQ);
  259. /* Check completion status */
  260. if (mtk_pcie_check_cfg_cpld(port))
  261. return PCIBIOS_SET_FAILED;
  262. /* Read cpld payload of Cfgrd */
  263. *val = readl(port->base + PCIE_CFG_RDATA);
  264. if (size == 1)
  265. *val = (*val >> (8 * (where & 3))) & 0xff;
  266. else if (size == 2)
  267. *val = (*val >> (8 * (where & 3))) & 0xffff;
  268. return PCIBIOS_SUCCESSFUL;
  269. }
  270. static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
  271. int where, int size, u32 val)
  272. {
  273. /* Write PCIe configuration transaction header for Cfgwr */
  274. writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
  275. port->base + PCIE_CFG_HEADER0);
  276. writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
  277. writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
  278. port->base + PCIE_CFG_HEADER2);
  279. /* Write Cfgwr data */
  280. val = val << 8 * (where & 3);
  281. writel(val, port->base + PCIE_CFG_WDATA);
  282. /* Trigger h/w to transmit Cfgwr TLP */
  283. val = readl(port->base + PCIE_APP_TLP_REQ);
  284. val |= APP_CFG_REQ;
  285. writel(val, port->base + PCIE_APP_TLP_REQ);
  286. /* Check completion status */
  287. return mtk_pcie_check_cfg_cpld(port);
  288. }
  289. static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
  290. unsigned int devfn)
  291. {
  292. struct mtk_pcie *pcie = bus->sysdata;
  293. struct mtk_pcie_port *port;
  294. list_for_each_entry(port, &pcie->ports, list)
  295. if (port->slot == PCI_SLOT(devfn))
  296. return port;
  297. return NULL;
  298. }
  299. static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
  300. int where, int size, u32 *val)
  301. {
  302. struct mtk_pcie_port *port;
  303. u32 bn = bus->number;
  304. int ret;
  305. port = mtk_pcie_find_port(bus, devfn);
  306. if (!port) {
  307. *val = ~0;
  308. return PCIBIOS_DEVICE_NOT_FOUND;
  309. }
  310. ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
  311. if (ret)
  312. *val = ~0;
  313. return ret;
  314. }
  315. static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
  316. int where, int size, u32 val)
  317. {
  318. struct mtk_pcie_port *port;
  319. u32 bn = bus->number;
  320. port = mtk_pcie_find_port(bus, devfn);
  321. if (!port)
  322. return PCIBIOS_DEVICE_NOT_FOUND;
  323. return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
  324. }
  325. static struct pci_ops mtk_pcie_ops_v2 = {
  326. .read = mtk_pcie_config_read,
  327. .write = mtk_pcie_config_write,
  328. };
  329. static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
  330. {
  331. struct mtk_pcie *pcie = port->pcie;
  332. struct resource *mem = &pcie->mem;
  333. const struct mtk_pcie_soc *soc = port->pcie->soc;
  334. u32 val;
  335. size_t size;
  336. int err;
  337. /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
  338. if (pcie->base) {
  339. val = readl(pcie->base + PCIE_SYS_CFG_V2);
  340. val |= PCIE_CSR_LTSSM_EN(port->slot) |
  341. PCIE_CSR_ASPM_L1_EN(port->slot);
  342. writel(val, pcie->base + PCIE_SYS_CFG_V2);
  343. }
  344. /* Assert all reset signals */
  345. writel(0, port->base + PCIE_RST_CTRL);
  346. /*
  347. * Enable PCIe link down reset, if link status changed from link up to
  348. * link down, this will reset MAC control registers and configuration
  349. * space.
  350. */
  351. writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
  352. /* De-assert PHY, PE, PIPE, MAC and configuration reset */
  353. val = readl(port->base + PCIE_RST_CTRL);
  354. val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
  355. PCIE_MAC_SRSTB | PCIE_CRSTB;
  356. writel(val, port->base + PCIE_RST_CTRL);
  357. /* Set up vendor ID and class code */
  358. if (soc->need_fix_class_id) {
  359. val = PCI_VENDOR_ID_MEDIATEK;
  360. writew(val, port->base + PCIE_CONF_VEND_ID);
  361. val = PCI_CLASS_BRIDGE_HOST;
  362. writew(val, port->base + PCIE_CONF_CLASS_ID);
  363. }
  364. /* 100ms timeout value should be enough for Gen1/2 training */
  365. err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
  366. !!(val & PCIE_PORT_LINKUP_V2), 20,
  367. 100 * USEC_PER_MSEC);
  368. if (err)
  369. return -ETIMEDOUT;
  370. /* Set INTx mask */
  371. val = readl(port->base + PCIE_INT_MASK);
  372. val &= ~INTX_MASK;
  373. writel(val, port->base + PCIE_INT_MASK);
  374. /* Set AHB to PCIe translation windows */
  375. size = mem->end - mem->start;
  376. val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
  377. writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
  378. val = upper_32_bits(mem->start);
  379. writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
  380. /* Set PCIe to AXI translation memory space.*/
  381. val = fls(0xffffffff) | WIN_ENABLE;
  382. writel(val, port->base + PCIE_AXI_WINDOW0);
  383. return 0;
  384. }
  385. static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  386. {
  387. struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
  388. phys_addr_t addr;
  389. /* MT2712/MT7622 only support 32-bit MSI addresses */
  390. addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
  391. msg->address_hi = 0;
  392. msg->address_lo = lower_32_bits(addr);
  393. msg->data = data->hwirq;
  394. dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
  395. (int)data->hwirq, msg->address_hi, msg->address_lo);
  396. }
  397. static int mtk_msi_set_affinity(struct irq_data *irq_data,
  398. const struct cpumask *mask, bool force)
  399. {
  400. return -EINVAL;
  401. }
  402. static void mtk_msi_ack_irq(struct irq_data *data)
  403. {
  404. struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
  405. u32 hwirq = data->hwirq;
  406. writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
  407. }
  408. static struct irq_chip mtk_msi_bottom_irq_chip = {
  409. .name = "MTK MSI",
  410. .irq_compose_msi_msg = mtk_compose_msi_msg,
  411. .irq_set_affinity = mtk_msi_set_affinity,
  412. .irq_ack = mtk_msi_ack_irq,
  413. };
  414. static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  415. unsigned int nr_irqs, void *args)
  416. {
  417. struct mtk_pcie_port *port = domain->host_data;
  418. unsigned long bit;
  419. WARN_ON(nr_irqs != 1);
  420. mutex_lock(&port->lock);
  421. bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
  422. if (bit >= MTK_MSI_IRQS_NUM) {
  423. mutex_unlock(&port->lock);
  424. return -ENOSPC;
  425. }
  426. __set_bit(bit, port->msi_irq_in_use);
  427. mutex_unlock(&port->lock);
  428. irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
  429. domain->host_data, handle_edge_irq,
  430. NULL, NULL);
  431. return 0;
  432. }
  433. static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
  434. unsigned int virq, unsigned int nr_irqs)
  435. {
  436. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  437. struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
  438. mutex_lock(&port->lock);
  439. if (!test_bit(d->hwirq, port->msi_irq_in_use))
  440. dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
  441. d->hwirq);
  442. else
  443. __clear_bit(d->hwirq, port->msi_irq_in_use);
  444. mutex_unlock(&port->lock);
  445. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  446. }
  447. static const struct irq_domain_ops msi_domain_ops = {
  448. .alloc = mtk_pcie_irq_domain_alloc,
  449. .free = mtk_pcie_irq_domain_free,
  450. };
  451. static struct irq_chip mtk_msi_irq_chip = {
  452. .name = "MTK PCIe MSI",
  453. .irq_ack = irq_chip_ack_parent,
  454. .irq_mask = pci_msi_mask_irq,
  455. .irq_unmask = pci_msi_unmask_irq,
  456. };
  457. static struct msi_domain_info mtk_msi_domain_info = {
  458. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  459. MSI_FLAG_PCI_MSIX),
  460. .chip = &mtk_msi_irq_chip,
  461. };
  462. static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
  463. {
  464. struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
  465. mutex_init(&port->lock);
  466. port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
  467. &msi_domain_ops, port);
  468. if (!port->inner_domain) {
  469. dev_err(port->pcie->dev, "failed to create IRQ domain\n");
  470. return -ENOMEM;
  471. }
  472. port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
  473. port->inner_domain);
  474. if (!port->msi_domain) {
  475. dev_err(port->pcie->dev, "failed to create MSI domain\n");
  476. irq_domain_remove(port->inner_domain);
  477. return -ENOMEM;
  478. }
  479. return 0;
  480. }
  481. static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
  482. {
  483. u32 val;
  484. phys_addr_t msg_addr;
  485. msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
  486. val = lower_32_bits(msg_addr);
  487. writel(val, port->base + PCIE_IMSI_ADDR);
  488. val = readl(port->base + PCIE_INT_MASK);
  489. val &= ~MSI_MASK;
  490. writel(val, port->base + PCIE_INT_MASK);
  491. }
  492. static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
  493. irq_hw_number_t hwirq)
  494. {
  495. irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
  496. irq_set_chip_data(irq, domain->host_data);
  497. return 0;
  498. }
  499. static const struct irq_domain_ops intx_domain_ops = {
  500. .map = mtk_pcie_intx_map,
  501. };
  502. static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
  503. struct device_node *node)
  504. {
  505. struct device *dev = port->pcie->dev;
  506. struct device_node *pcie_intc_node;
  507. int ret;
  508. /* Setup INTx */
  509. pcie_intc_node = of_get_next_child(node, NULL);
  510. if (!pcie_intc_node) {
  511. dev_err(dev, "no PCIe Intc node found\n");
  512. return -ENODEV;
  513. }
  514. port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
  515. &intx_domain_ops, port);
  516. if (!port->irq_domain) {
  517. dev_err(dev, "failed to get INTx IRQ domain\n");
  518. return -ENODEV;
  519. }
  520. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  521. ret = mtk_pcie_allocate_msi_domains(port);
  522. if (ret)
  523. return ret;
  524. mtk_pcie_enable_msi(port);
  525. }
  526. return 0;
  527. }
  528. static void mtk_pcie_intr_handler(struct irq_desc *desc)
  529. {
  530. struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
  531. struct irq_chip *irqchip = irq_desc_get_chip(desc);
  532. unsigned long status;
  533. u32 virq;
  534. u32 bit = INTX_SHIFT;
  535. chained_irq_enter(irqchip, desc);
  536. status = readl(port->base + PCIE_INT_STATUS);
  537. if (status & INTX_MASK) {
  538. for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
  539. /* Clear the INTx */
  540. writel(1 << bit, port->base + PCIE_INT_STATUS);
  541. virq = irq_find_mapping(port->irq_domain,
  542. bit - INTX_SHIFT);
  543. generic_handle_irq(virq);
  544. }
  545. }
  546. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  547. if (status & MSI_STATUS){
  548. unsigned long imsi_status;
  549. while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
  550. for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
  551. virq = irq_find_mapping(port->inner_domain, bit);
  552. generic_handle_irq(virq);
  553. }
  554. }
  555. /* Clear MSI interrupt status */
  556. writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
  557. }
  558. }
  559. chained_irq_exit(irqchip, desc);
  560. return;
  561. }
  562. static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
  563. struct device_node *node)
  564. {
  565. struct mtk_pcie *pcie = port->pcie;
  566. struct device *dev = pcie->dev;
  567. struct platform_device *pdev = to_platform_device(dev);
  568. int err, irq;
  569. err = mtk_pcie_init_irq_domain(port, node);
  570. if (err) {
  571. dev_err(dev, "failed to init PCIe IRQ domain\n");
  572. return err;
  573. }
  574. irq = platform_get_irq(pdev, port->slot);
  575. irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
  576. return 0;
  577. }
  578. static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
  579. unsigned int devfn, int where)
  580. {
  581. struct mtk_pcie *pcie = bus->sysdata;
  582. writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
  583. bus->number), pcie->base + PCIE_CFG_ADDR);
  584. return pcie->base + PCIE_CFG_DATA + (where & 3);
  585. }
  586. static struct pci_ops mtk_pcie_ops = {
  587. .map_bus = mtk_pcie_map_bus,
  588. .read = pci_generic_config_read,
  589. .write = pci_generic_config_write,
  590. };
  591. static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
  592. {
  593. struct mtk_pcie *pcie = port->pcie;
  594. u32 func = PCI_FUNC(port->slot << 3);
  595. u32 slot = PCI_SLOT(port->slot << 3);
  596. u32 val;
  597. int err;
  598. /* assert port PERST_N */
  599. val = readl(pcie->base + PCIE_SYS_CFG);
  600. val |= PCIE_PORT_PERST(port->slot);
  601. writel(val, pcie->base + PCIE_SYS_CFG);
  602. /* de-assert port PERST_N */
  603. val = readl(pcie->base + PCIE_SYS_CFG);
  604. val &= ~PCIE_PORT_PERST(port->slot);
  605. writel(val, pcie->base + PCIE_SYS_CFG);
  606. /* 100ms timeout value should be enough for Gen1/2 training */
  607. err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
  608. !!(val & PCIE_PORT_LINKUP), 20,
  609. 100 * USEC_PER_MSEC);
  610. if (err)
  611. return -ETIMEDOUT;
  612. /* enable interrupt */
  613. val = readl(pcie->base + PCIE_INT_ENABLE);
  614. val |= PCIE_PORT_INT_EN(port->slot);
  615. writel(val, pcie->base + PCIE_INT_ENABLE);
  616. /* map to all DDR region. We need to set it before cfg operation. */
  617. writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
  618. port->base + PCIE_BAR0_SETUP);
  619. /* configure class code and revision ID */
  620. writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
  621. /* configure FC credit */
  622. writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
  623. pcie->base + PCIE_CFG_ADDR);
  624. val = readl(pcie->base + PCIE_CFG_DATA);
  625. val &= ~PCIE_FC_CREDIT_MASK;
  626. val |= PCIE_FC_CREDIT_VAL(0x806c);
  627. writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
  628. pcie->base + PCIE_CFG_ADDR);
  629. writel(val, pcie->base + PCIE_CFG_DATA);
  630. /* configure RC FTS number to 250 when it leaves L0s */
  631. writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
  632. pcie->base + PCIE_CFG_ADDR);
  633. val = readl(pcie->base + PCIE_CFG_DATA);
  634. val &= ~PCIE_FTS_NUM_MASK;
  635. val |= PCIE_FTS_NUM_L0(0x50);
  636. writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
  637. pcie->base + PCIE_CFG_ADDR);
  638. writel(val, pcie->base + PCIE_CFG_DATA);
  639. return 0;
  640. }
  641. static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
  642. {
  643. struct mtk_pcie *pcie = port->pcie;
  644. struct device *dev = pcie->dev;
  645. int err;
  646. err = clk_prepare_enable(port->sys_ck);
  647. if (err) {
  648. dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
  649. goto err_sys_clk;
  650. }
  651. err = clk_prepare_enable(port->ahb_ck);
  652. if (err) {
  653. dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
  654. goto err_ahb_clk;
  655. }
  656. err = clk_prepare_enable(port->aux_ck);
  657. if (err) {
  658. dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
  659. goto err_aux_clk;
  660. }
  661. err = clk_prepare_enable(port->axi_ck);
  662. if (err) {
  663. dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
  664. goto err_axi_clk;
  665. }
  666. err = clk_prepare_enable(port->obff_ck);
  667. if (err) {
  668. dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
  669. goto err_obff_clk;
  670. }
  671. err = clk_prepare_enable(port->pipe_ck);
  672. if (err) {
  673. dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
  674. goto err_pipe_clk;
  675. }
  676. reset_control_assert(port->reset);
  677. reset_control_deassert(port->reset);
  678. err = phy_init(port->phy);
  679. if (err) {
  680. dev_err(dev, "failed to initialize port%d phy\n", port->slot);
  681. goto err_phy_init;
  682. }
  683. err = phy_power_on(port->phy);
  684. if (err) {
  685. dev_err(dev, "failed to power on port%d phy\n", port->slot);
  686. goto err_phy_on;
  687. }
  688. if (!pcie->soc->startup(port))
  689. return;
  690. dev_info(dev, "Port%d link down\n", port->slot);
  691. phy_power_off(port->phy);
  692. err_phy_on:
  693. phy_exit(port->phy);
  694. err_phy_init:
  695. clk_disable_unprepare(port->pipe_ck);
  696. err_pipe_clk:
  697. clk_disable_unprepare(port->obff_ck);
  698. err_obff_clk:
  699. clk_disable_unprepare(port->axi_ck);
  700. err_axi_clk:
  701. clk_disable_unprepare(port->aux_ck);
  702. err_aux_clk:
  703. clk_disable_unprepare(port->ahb_ck);
  704. err_ahb_clk:
  705. clk_disable_unprepare(port->sys_ck);
  706. err_sys_clk:
  707. mtk_pcie_port_free(port);
  708. }
  709. static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
  710. struct device_node *node,
  711. int slot)
  712. {
  713. struct mtk_pcie_port *port;
  714. struct resource *regs;
  715. struct device *dev = pcie->dev;
  716. struct platform_device *pdev = to_platform_device(dev);
  717. char name[10];
  718. int err;
  719. port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
  720. if (!port)
  721. return -ENOMEM;
  722. err = of_property_read_u32(node, "num-lanes", &port->lane);
  723. if (err) {
  724. dev_err(dev, "missing num-lanes property\n");
  725. return err;
  726. }
  727. snprintf(name, sizeof(name), "port%d", slot);
  728. regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  729. port->base = devm_ioremap_resource(dev, regs);
  730. if (IS_ERR(port->base)) {
  731. dev_err(dev, "failed to map port%d base\n", slot);
  732. return PTR_ERR(port->base);
  733. }
  734. snprintf(name, sizeof(name), "sys_ck%d", slot);
  735. port->sys_ck = devm_clk_get(dev, name);
  736. if (IS_ERR(port->sys_ck)) {
  737. dev_err(dev, "failed to get sys_ck%d clock\n", slot);
  738. return PTR_ERR(port->sys_ck);
  739. }
  740. /* sys_ck might be divided into the following parts in some chips */
  741. snprintf(name, sizeof(name), "ahb_ck%d", slot);
  742. port->ahb_ck = devm_clk_get(dev, name);
  743. if (IS_ERR(port->ahb_ck)) {
  744. if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER)
  745. return -EPROBE_DEFER;
  746. port->ahb_ck = NULL;
  747. }
  748. snprintf(name, sizeof(name), "axi_ck%d", slot);
  749. port->axi_ck = devm_clk_get(dev, name);
  750. if (IS_ERR(port->axi_ck)) {
  751. if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER)
  752. return -EPROBE_DEFER;
  753. port->axi_ck = NULL;
  754. }
  755. snprintf(name, sizeof(name), "aux_ck%d", slot);
  756. port->aux_ck = devm_clk_get(dev, name);
  757. if (IS_ERR(port->aux_ck)) {
  758. if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER)
  759. return -EPROBE_DEFER;
  760. port->aux_ck = NULL;
  761. }
  762. snprintf(name, sizeof(name), "obff_ck%d", slot);
  763. port->obff_ck = devm_clk_get(dev, name);
  764. if (IS_ERR(port->obff_ck)) {
  765. if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER)
  766. return -EPROBE_DEFER;
  767. port->obff_ck = NULL;
  768. }
  769. snprintf(name, sizeof(name), "pipe_ck%d", slot);
  770. port->pipe_ck = devm_clk_get(dev, name);
  771. if (IS_ERR(port->pipe_ck)) {
  772. if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER)
  773. return -EPROBE_DEFER;
  774. port->pipe_ck = NULL;
  775. }
  776. snprintf(name, sizeof(name), "pcie-rst%d", slot);
  777. port->reset = devm_reset_control_get_optional_exclusive(dev, name);
  778. if (PTR_ERR(port->reset) == -EPROBE_DEFER)
  779. return PTR_ERR(port->reset);
  780. /* some platforms may use default PHY setting */
  781. snprintf(name, sizeof(name), "pcie-phy%d", slot);
  782. port->phy = devm_phy_optional_get(dev, name);
  783. if (IS_ERR(port->phy))
  784. return PTR_ERR(port->phy);
  785. port->slot = slot;
  786. port->pcie = pcie;
  787. if (pcie->soc->setup_irq) {
  788. err = pcie->soc->setup_irq(port, node);
  789. if (err)
  790. return err;
  791. }
  792. INIT_LIST_HEAD(&port->list);
  793. list_add_tail(&port->list, &pcie->ports);
  794. return 0;
  795. }
  796. static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
  797. {
  798. struct device *dev = pcie->dev;
  799. struct platform_device *pdev = to_platform_device(dev);
  800. struct resource *regs;
  801. int err;
  802. /* get shared registers, which are optional */
  803. regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
  804. if (regs) {
  805. pcie->base = devm_ioremap_resource(dev, regs);
  806. if (IS_ERR(pcie->base)) {
  807. dev_err(dev, "failed to map shared register\n");
  808. return PTR_ERR(pcie->base);
  809. }
  810. }
  811. pcie->free_ck = devm_clk_get(dev, "free_ck");
  812. if (IS_ERR(pcie->free_ck)) {
  813. if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
  814. return -EPROBE_DEFER;
  815. pcie->free_ck = NULL;
  816. }
  817. if (dev->pm_domain) {
  818. pm_runtime_enable(dev);
  819. pm_runtime_get_sync(dev);
  820. }
  821. /* enable top level clock */
  822. err = clk_prepare_enable(pcie->free_ck);
  823. if (err) {
  824. dev_err(dev, "failed to enable free_ck\n");
  825. goto err_free_ck;
  826. }
  827. return 0;
  828. err_free_ck:
  829. if (dev->pm_domain) {
  830. pm_runtime_put_sync(dev);
  831. pm_runtime_disable(dev);
  832. }
  833. return err;
  834. }
  835. static int mtk_pcie_setup(struct mtk_pcie *pcie)
  836. {
  837. struct device *dev = pcie->dev;
  838. struct device_node *node = dev->of_node, *child;
  839. struct of_pci_range_parser parser;
  840. struct of_pci_range range;
  841. struct resource res;
  842. struct mtk_pcie_port *port, *tmp;
  843. int err;
  844. if (of_pci_range_parser_init(&parser, node)) {
  845. dev_err(dev, "missing \"ranges\" property\n");
  846. return -EINVAL;
  847. }
  848. for_each_of_pci_range(&parser, &range) {
  849. err = of_pci_range_to_resource(&range, node, &res);
  850. if (err < 0)
  851. return err;
  852. switch (res.flags & IORESOURCE_TYPE_BITS) {
  853. case IORESOURCE_IO:
  854. pcie->offset.io = res.start - range.pci_addr;
  855. memcpy(&pcie->pio, &res, sizeof(res));
  856. pcie->pio.name = node->full_name;
  857. pcie->io.start = range.cpu_addr;
  858. pcie->io.end = range.cpu_addr + range.size - 1;
  859. pcie->io.flags = IORESOURCE_MEM;
  860. pcie->io.name = "I/O";
  861. memcpy(&res, &pcie->io, sizeof(res));
  862. break;
  863. case IORESOURCE_MEM:
  864. pcie->offset.mem = res.start - range.pci_addr;
  865. memcpy(&pcie->mem, &res, sizeof(res));
  866. pcie->mem.name = "non-prefetchable";
  867. break;
  868. }
  869. }
  870. err = of_pci_parse_bus_range(node, &pcie->busn);
  871. if (err < 0) {
  872. dev_err(dev, "failed to parse bus ranges property: %d\n", err);
  873. pcie->busn.name = node->name;
  874. pcie->busn.start = 0;
  875. pcie->busn.end = 0xff;
  876. pcie->busn.flags = IORESOURCE_BUS;
  877. }
  878. for_each_available_child_of_node(node, child) {
  879. int slot;
  880. err = of_pci_get_devfn(child);
  881. if (err < 0) {
  882. dev_err(dev, "failed to parse devfn: %d\n", err);
  883. return err;
  884. }
  885. slot = PCI_SLOT(err);
  886. err = mtk_pcie_parse_port(pcie, child, slot);
  887. if (err)
  888. return err;
  889. }
  890. err = mtk_pcie_subsys_powerup(pcie);
  891. if (err)
  892. return err;
  893. /* enable each port, and then check link status */
  894. list_for_each_entry_safe(port, tmp, &pcie->ports, list)
  895. mtk_pcie_enable_port(port);
  896. /* power down PCIe subsys if slots are all empty (link down) */
  897. if (list_empty(&pcie->ports))
  898. mtk_pcie_subsys_powerdown(pcie);
  899. return 0;
  900. }
  901. static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
  902. {
  903. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  904. struct list_head *windows = &host->windows;
  905. struct device *dev = pcie->dev;
  906. int err;
  907. pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
  908. pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
  909. pci_add_resource(windows, &pcie->busn);
  910. err = devm_request_pci_bus_resources(dev, windows);
  911. if (err < 0)
  912. return err;
  913. devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
  914. return 0;
  915. }
  916. static int mtk_pcie_register_host(struct pci_host_bridge *host)
  917. {
  918. struct mtk_pcie *pcie = pci_host_bridge_priv(host);
  919. struct pci_bus *child;
  920. int err;
  921. host->busnr = pcie->busn.start;
  922. host->dev.parent = pcie->dev;
  923. host->ops = pcie->soc->ops;
  924. host->map_irq = of_irq_parse_and_map_pci;
  925. host->swizzle_irq = pci_common_swizzle;
  926. host->sysdata = pcie;
  927. err = pci_scan_root_bus_bridge(host);
  928. if (err < 0)
  929. return err;
  930. pci_bus_size_bridges(host->bus);
  931. pci_bus_assign_resources(host->bus);
  932. list_for_each_entry(child, &host->bus->children, node)
  933. pcie_bus_configure_settings(child);
  934. pci_bus_add_devices(host->bus);
  935. return 0;
  936. }
  937. static int mtk_pcie_probe(struct platform_device *pdev)
  938. {
  939. struct device *dev = &pdev->dev;
  940. struct mtk_pcie *pcie;
  941. struct pci_host_bridge *host;
  942. int err;
  943. host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
  944. if (!host)
  945. return -ENOMEM;
  946. pcie = pci_host_bridge_priv(host);
  947. pcie->dev = dev;
  948. pcie->soc = of_device_get_match_data(dev);
  949. platform_set_drvdata(pdev, pcie);
  950. INIT_LIST_HEAD(&pcie->ports);
  951. err = mtk_pcie_setup(pcie);
  952. if (err)
  953. return err;
  954. err = mtk_pcie_request_resources(pcie);
  955. if (err)
  956. goto put_resources;
  957. err = mtk_pcie_register_host(host);
  958. if (err)
  959. goto put_resources;
  960. return 0;
  961. put_resources:
  962. if (!list_empty(&pcie->ports))
  963. mtk_pcie_put_resources(pcie);
  964. return err;
  965. }
  966. static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
  967. .ops = &mtk_pcie_ops,
  968. .startup = mtk_pcie_startup_port,
  969. };
  970. static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
  971. .ops = &mtk_pcie_ops_v2,
  972. .startup = mtk_pcie_startup_port_v2,
  973. .setup_irq = mtk_pcie_setup_irq,
  974. };
  975. static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
  976. .need_fix_class_id = true,
  977. .ops = &mtk_pcie_ops_v2,
  978. .startup = mtk_pcie_startup_port_v2,
  979. .setup_irq = mtk_pcie_setup_irq,
  980. };
  981. static const struct of_device_id mtk_pcie_ids[] = {
  982. { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
  983. { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
  984. { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
  985. { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
  986. {},
  987. };
  988. static struct platform_driver mtk_pcie_driver = {
  989. .probe = mtk_pcie_probe,
  990. .driver = {
  991. .name = "mtk-pcie",
  992. .of_match_table = mtk_pcie_ids,
  993. .suppress_bind_attrs = true,
  994. },
  995. };
  996. builtin_platform_driver(mtk_pcie_driver);