pcie-mediatek.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. /*
  2. * MediaTek PCIe host controller driver.
  3. *
  4. * Copyright (c) 2017 MediaTek Inc.
  5. * Author: Ryder Lee <ryder.lee@mediatek.com>
  6. * Honghui Zhang <honghui.zhang@mediatek.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/clk.h>
  18. #include <linux/delay.h>
  19. #include <linux/iopoll.h>
  20. #include <linux/irq.h>
  21. #include <linux/irqdomain.h>
  22. #include <linux/kernel.h>
  23. #include <linux/of_address.h>
  24. #include <linux/of_pci.h>
  25. #include <linux/of_platform.h>
  26. #include <linux/pci.h>
  27. #include <linux/phy/phy.h>
  28. #include <linux/platform_device.h>
  29. #include <linux/pm_runtime.h>
  30. #include <linux/reset.h>
  31. /* PCIe shared registers */
  32. #define PCIE_SYS_CFG 0x00
  33. #define PCIE_INT_ENABLE 0x0c
  34. #define PCIE_CFG_ADDR 0x20
  35. #define PCIE_CFG_DATA 0x24
  36. /* PCIe per port registers */
  37. #define PCIE_BAR0_SETUP 0x10
  38. #define PCIE_CLASS 0x34
  39. #define PCIE_LINK_STATUS 0x50
  40. #define PCIE_PORT_INT_EN(x) BIT(20 + (x))
  41. #define PCIE_PORT_PERST(x) BIT(1 + (x))
  42. #define PCIE_PORT_LINKUP BIT(0)
  43. #define PCIE_BAR_MAP_MAX GENMASK(31, 16)
  44. #define PCIE_BAR_ENABLE BIT(0)
  45. #define PCIE_REVISION_ID BIT(0)
  46. #define PCIE_CLASS_CODE (0x60400 << 8)
  47. #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \
  48. ((((regn) >> 8) & GENMASK(3, 0)) << 24))
  49. #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8))
  50. #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11))
  51. #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16))
  52. #define PCIE_CONF_ADDR(regn, fun, dev, bus) \
  53. (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
  54. PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
  55. /* MediaTek specific configuration registers */
  56. #define PCIE_FTS_NUM 0x70c
  57. #define PCIE_FTS_NUM_MASK GENMASK(15, 8)
  58. #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
  59. #define PCIE_FC_CREDIT 0x73c
  60. #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
  61. #define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
  62. /* PCIe V2 share registers */
  63. #define PCIE_SYS_CFG_V2 0x0
  64. #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8)
  65. #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8)
  66. /* PCIe V2 per-port registers */
  67. #define PCIE_MSI_VECTOR 0x0c0
  68. #define PCIE_INT_MASK 0x420
  69. #define INTX_MASK GENMASK(19, 16)
  70. #define INTX_SHIFT 16
  71. #define PCIE_INT_STATUS 0x424
  72. #define MSI_STATUS BIT(23)
  73. #define PCIE_IMSI_STATUS 0x42c
  74. #define PCIE_IMSI_ADDR 0x430
  75. #define MSI_MASK BIT(23)
  76. #define MTK_MSI_IRQS_NUM 32
  77. #define PCIE_AHB_TRANS_BASE0_L 0x438
  78. #define PCIE_AHB_TRANS_BASE0_H 0x43c
  79. #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0))
  80. #define PCIE_AXI_WINDOW0 0x448
  81. #define WIN_ENABLE BIT(7)
  82. /* PCIe V2 configuration transaction header */
  83. #define PCIE_CFG_HEADER0 0x460
  84. #define PCIE_CFG_HEADER1 0x464
  85. #define PCIE_CFG_HEADER2 0x468
  86. #define PCIE_CFG_WDATA 0x470
  87. #define PCIE_APP_TLP_REQ 0x488
  88. #define PCIE_CFG_RDATA 0x48c
  89. #define APP_CFG_REQ BIT(0)
  90. #define APP_CPL_STATUS GENMASK(7, 5)
  91. #define CFG_WRRD_TYPE_0 4
  92. #define CFG_WR_FMT 2
  93. #define CFG_RD_FMT 0
  94. #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0))
  95. #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24))
  96. #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29))
  97. #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2))
  98. #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16))
  99. #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19))
  100. #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24))
  101. #define CFG_HEADER_DW0(type, fmt) \
  102. (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
  103. #define CFG_HEADER_DW1(where, size) \
  104. (GENMASK(((size) - 1), 0) << ((where) & 0x3))
  105. #define CFG_HEADER_DW2(regn, fun, dev, bus) \
  106. (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
  107. CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
  108. #define PCIE_RST_CTRL 0x510
  109. #define PCIE_PHY_RSTB BIT(0)
  110. #define PCIE_PIPE_SRSTB BIT(1)
  111. #define PCIE_MAC_SRSTB BIT(2)
  112. #define PCIE_CRSTB BIT(3)
  113. #define PCIE_PERSTB BIT(8)
  114. #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13)
  115. #define PCIE_LINK_STATUS_V2 0x804
  116. #define PCIE_PORT_LINKUP_V2 BIT(10)
  117. struct mtk_pcie_port;
  118. /**
  119. * struct mtk_pcie_soc - differentiate between host generations
  120. * @has_msi: whether this host supports MSI interrupts or not
  121. * @ops: pointer to configuration access functions
  122. * @startup: pointer to controller setting functions
  123. * @setup_irq: pointer to initialize IRQ functions
  124. */
  125. struct mtk_pcie_soc {
  126. bool has_msi;
  127. struct pci_ops *ops;
  128. int (*startup)(struct mtk_pcie_port *port);
  129. int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
  130. };
  131. /**
  132. * struct mtk_pcie_port - PCIe port information
  133. * @base: IO mapped register base
  134. * @list: port list
  135. * @pcie: pointer to PCIe host info
  136. * @reset: pointer to port reset control
  137. * @sys_ck: pointer to transaction/data link layer clock
  138. * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
  139. * and RC initiated MMIO access
  140. * @axi_ck: pointer to application layer MMIO channel operating clock
  141. * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
  142. * when pcie_mac_ck/pcie_pipe_ck is turned off
  143. * @obff_ck: pointer to OBFF functional block operating clock
  144. * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
  145. * @phy: pointer to PHY control block
  146. * @lane: lane count
  147. * @slot: port slot
  148. * @irq_domain: legacy INTx IRQ domain
  149. * @msi_domain: MSI IRQ domain
  150. * @msi_irq_in_use: bit map for assigned MSI IRQ
  151. */
  152. struct mtk_pcie_port {
  153. void __iomem *base;
  154. struct list_head list;
  155. struct mtk_pcie *pcie;
  156. struct reset_control *reset;
  157. struct clk *sys_ck;
  158. struct clk *ahb_ck;
  159. struct clk *axi_ck;
  160. struct clk *aux_ck;
  161. struct clk *obff_ck;
  162. struct clk *pipe_ck;
  163. struct phy *phy;
  164. u32 lane;
  165. u32 slot;
  166. struct irq_domain *irq_domain;
  167. struct irq_domain *msi_domain;
  168. DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
  169. };
  170. /**
  171. * struct mtk_pcie - PCIe host information
  172. * @dev: pointer to PCIe device
  173. * @base: IO mapped register base
  174. * @free_ck: free-run reference clock
  175. * @io: IO resource
  176. * @pio: PIO resource
  177. * @mem: non-prefetchable memory resource
  178. * @busn: bus range
  179. * @offset: IO / Memory offset
  180. * @ports: pointer to PCIe port information
  181. * @soc: pointer to SoC-dependent operations
  182. */
  183. struct mtk_pcie {
  184. struct device *dev;
  185. void __iomem *base;
  186. struct clk *free_ck;
  187. struct resource io;
  188. struct resource pio;
  189. struct resource mem;
  190. struct resource busn;
  191. struct {
  192. resource_size_t mem;
  193. resource_size_t io;
  194. } offset;
  195. struct list_head ports;
  196. const struct mtk_pcie_soc *soc;
  197. };
  198. static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
  199. {
  200. struct device *dev = pcie->dev;
  201. clk_disable_unprepare(pcie->free_ck);
  202. if (dev->pm_domain) {
  203. pm_runtime_put_sync(dev);
  204. pm_runtime_disable(dev);
  205. }
  206. }
  207. static void mtk_pcie_port_free(struct mtk_pcie_port *port)
  208. {
  209. struct mtk_pcie *pcie = port->pcie;
  210. struct device *dev = pcie->dev;
  211. devm_iounmap(dev, port->base);
  212. list_del(&port->list);
  213. devm_kfree(dev, port);
  214. }
  215. static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
  216. {
  217. struct mtk_pcie_port *port, *tmp;
  218. list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
  219. phy_power_off(port->phy);
  220. phy_exit(port->phy);
  221. clk_disable_unprepare(port->pipe_ck);
  222. clk_disable_unprepare(port->obff_ck);
  223. clk_disable_unprepare(port->axi_ck);
  224. clk_disable_unprepare(port->aux_ck);
  225. clk_disable_unprepare(port->ahb_ck);
  226. clk_disable_unprepare(port->sys_ck);
  227. mtk_pcie_port_free(port);
  228. }
  229. mtk_pcie_subsys_powerdown(pcie);
  230. }
  231. static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
  232. {
  233. u32 val;
  234. int err;
  235. err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
  236. !(val & APP_CFG_REQ), 10,
  237. 100 * USEC_PER_MSEC);
  238. if (err)
  239. return PCIBIOS_SET_FAILED;
  240. if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
  241. return PCIBIOS_SET_FAILED;
  242. return PCIBIOS_SUCCESSFUL;
  243. }
  244. static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
  245. int where, int size, u32 *val)
  246. {
  247. u32 tmp;
  248. /* Write PCIe configuration transaction header for Cfgrd */
  249. writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
  250. port->base + PCIE_CFG_HEADER0);
  251. writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
  252. writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
  253. port->base + PCIE_CFG_HEADER2);
  254. /* Trigger h/w to transmit Cfgrd TLP */
  255. tmp = readl(port->base + PCIE_APP_TLP_REQ);
  256. tmp |= APP_CFG_REQ;
  257. writel(tmp, port->base + PCIE_APP_TLP_REQ);
  258. /* Check completion status */
  259. if (mtk_pcie_check_cfg_cpld(port))
  260. return PCIBIOS_SET_FAILED;
  261. /* Read cpld payload of Cfgrd */
  262. *val = readl(port->base + PCIE_CFG_RDATA);
  263. if (size == 1)
  264. *val = (*val >> (8 * (where & 3))) & 0xff;
  265. else if (size == 2)
  266. *val = (*val >> (8 * (where & 3))) & 0xffff;
  267. return PCIBIOS_SUCCESSFUL;
  268. }
  269. static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
  270. int where, int size, u32 val)
  271. {
  272. /* Write PCIe configuration transaction header for Cfgwr */
  273. writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
  274. port->base + PCIE_CFG_HEADER0);
  275. writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
  276. writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
  277. port->base + PCIE_CFG_HEADER2);
  278. /* Write Cfgwr data */
  279. val = val << 8 * (where & 3);
  280. writel(val, port->base + PCIE_CFG_WDATA);
  281. /* Trigger h/w to transmit Cfgwr TLP */
  282. val = readl(port->base + PCIE_APP_TLP_REQ);
  283. val |= APP_CFG_REQ;
  284. writel(val, port->base + PCIE_APP_TLP_REQ);
  285. /* Check completion status */
  286. return mtk_pcie_check_cfg_cpld(port);
  287. }
  288. static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
  289. unsigned int devfn)
  290. {
  291. struct mtk_pcie *pcie = bus->sysdata;
  292. struct mtk_pcie_port *port;
  293. list_for_each_entry(port, &pcie->ports, list)
  294. if (port->slot == PCI_SLOT(devfn))
  295. return port;
  296. return NULL;
  297. }
  298. static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
  299. int where, int size, u32 *val)
  300. {
  301. struct mtk_pcie_port *port;
  302. u32 bn = bus->number;
  303. int ret;
  304. port = mtk_pcie_find_port(bus, devfn);
  305. if (!port) {
  306. *val = ~0;
  307. return PCIBIOS_DEVICE_NOT_FOUND;
  308. }
  309. ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
  310. if (ret)
  311. *val = ~0;
  312. return ret;
  313. }
  314. static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
  315. int where, int size, u32 val)
  316. {
  317. struct mtk_pcie_port *port;
  318. u32 bn = bus->number;
  319. port = mtk_pcie_find_port(bus, devfn);
  320. if (!port)
  321. return PCIBIOS_DEVICE_NOT_FOUND;
  322. return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
  323. }
  324. static struct pci_ops mtk_pcie_ops_v2 = {
  325. .read = mtk_pcie_config_read,
  326. .write = mtk_pcie_config_write,
  327. };
  328. static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
  329. {
  330. struct mtk_pcie *pcie = port->pcie;
  331. struct resource *mem = &pcie->mem;
  332. u32 val;
  333. size_t size;
  334. int err;
  335. /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
  336. if (pcie->base) {
  337. val = readl(pcie->base + PCIE_SYS_CFG_V2);
  338. val |= PCIE_CSR_LTSSM_EN(port->slot) |
  339. PCIE_CSR_ASPM_L1_EN(port->slot);
  340. writel(val, pcie->base + PCIE_SYS_CFG_V2);
  341. }
  342. /* Assert all reset signals */
  343. writel(0, port->base + PCIE_RST_CTRL);
  344. /*
  345. * Enable PCIe link down reset, if link status changed from link up to
  346. * link down, this will reset MAC control registers and configuration
  347. * space.
  348. */
  349. writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
  350. /* De-assert PHY, PE, PIPE, MAC and configuration reset */
  351. val = readl(port->base + PCIE_RST_CTRL);
  352. val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
  353. PCIE_MAC_SRSTB | PCIE_CRSTB;
  354. writel(val, port->base + PCIE_RST_CTRL);
  355. /* 100ms timeout value should be enough for Gen1/2 training */
  356. err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
  357. !!(val & PCIE_PORT_LINKUP_V2), 20,
  358. 100 * USEC_PER_MSEC);
  359. if (err)
  360. return -ETIMEDOUT;
  361. /* Set INTx mask */
  362. val = readl(port->base + PCIE_INT_MASK);
  363. val &= ~INTX_MASK;
  364. writel(val, port->base + PCIE_INT_MASK);
  365. /* Set AHB to PCIe translation windows */
  366. size = mem->end - mem->start;
  367. val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
  368. writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
  369. val = upper_32_bits(mem->start);
  370. writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
  371. /* Set PCIe to AXI translation memory space.*/
  372. val = fls(0xffffffff) | WIN_ENABLE;
  373. writel(val, port->base + PCIE_AXI_WINDOW0);
  374. return 0;
  375. }
  376. static int mtk_pcie_msi_alloc(struct mtk_pcie_port *port)
  377. {
  378. int msi;
  379. msi = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
  380. if (msi < MTK_MSI_IRQS_NUM)
  381. set_bit(msi, port->msi_irq_in_use);
  382. else
  383. return -ENOSPC;
  384. return msi;
  385. }
  386. static void mtk_pcie_msi_free(struct mtk_pcie_port *port, unsigned long hwirq)
  387. {
  388. clear_bit(hwirq, port->msi_irq_in_use);
  389. }
  390. static int mtk_pcie_msi_setup_irq(struct msi_controller *chip,
  391. struct pci_dev *pdev, struct msi_desc *desc)
  392. {
  393. struct mtk_pcie_port *port;
  394. struct msi_msg msg;
  395. unsigned int irq;
  396. int hwirq;
  397. phys_addr_t msg_addr;
  398. port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
  399. if (!port)
  400. return -EINVAL;
  401. hwirq = mtk_pcie_msi_alloc(port);
  402. if (hwirq < 0)
  403. return hwirq;
  404. irq = irq_create_mapping(port->msi_domain, hwirq);
  405. if (!irq) {
  406. mtk_pcie_msi_free(port, hwirq);
  407. return -EINVAL;
  408. }
  409. chip->dev = &pdev->dev;
  410. irq_set_msi_desc(irq, desc);
  411. /* MT2712/MT7622 only support 32-bit MSI addresses */
  412. msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
  413. msg.address_hi = 0;
  414. msg.address_lo = lower_32_bits(msg_addr);
  415. msg.data = hwirq;
  416. pci_write_msi_msg(irq, &msg);
  417. return 0;
  418. }
  419. static void mtk_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
  420. {
  421. struct pci_dev *pdev = to_pci_dev(chip->dev);
  422. struct irq_data *d = irq_get_irq_data(irq);
  423. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  424. struct mtk_pcie_port *port;
  425. port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
  426. if (!port)
  427. return;
  428. irq_dispose_mapping(irq);
  429. mtk_pcie_msi_free(port, hwirq);
  430. }
  431. static struct msi_controller mtk_pcie_msi_chip = {
  432. .setup_irq = mtk_pcie_msi_setup_irq,
  433. .teardown_irq = mtk_msi_teardown_irq,
  434. };
  435. static struct irq_chip mtk_msi_irq_chip = {
  436. .name = "MTK PCIe MSI",
  437. .irq_enable = pci_msi_unmask_irq,
  438. .irq_disable = pci_msi_mask_irq,
  439. .irq_mask = pci_msi_mask_irq,
  440. .irq_unmask = pci_msi_unmask_irq,
  441. };
  442. static int mtk_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
  443. irq_hw_number_t hwirq)
  444. {
  445. irq_set_chip_and_handler(irq, &mtk_msi_irq_chip, handle_simple_irq);
  446. irq_set_chip_data(irq, domain->host_data);
  447. return 0;
  448. }
  449. static const struct irq_domain_ops msi_domain_ops = {
  450. .map = mtk_pcie_msi_map,
  451. };
  452. static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
  453. {
  454. u32 val;
  455. phys_addr_t msg_addr;
  456. msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
  457. val = lower_32_bits(msg_addr);
  458. writel(val, port->base + PCIE_IMSI_ADDR);
  459. val = readl(port->base + PCIE_INT_MASK);
  460. val &= ~MSI_MASK;
  461. writel(val, port->base + PCIE_INT_MASK);
  462. }
  463. static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
  464. irq_hw_number_t hwirq)
  465. {
  466. irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
  467. irq_set_chip_data(irq, domain->host_data);
  468. return 0;
  469. }
  470. static const struct irq_domain_ops intx_domain_ops = {
  471. .map = mtk_pcie_intx_map,
  472. };
  473. static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
  474. struct device_node *node)
  475. {
  476. struct device *dev = port->pcie->dev;
  477. struct device_node *pcie_intc_node;
  478. /* Setup INTx */
  479. pcie_intc_node = of_get_next_child(node, NULL);
  480. if (!pcie_intc_node) {
  481. dev_err(dev, "no PCIe Intc node found\n");
  482. return -ENODEV;
  483. }
  484. port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
  485. &intx_domain_ops, port);
  486. if (!port->irq_domain) {
  487. dev_err(dev, "failed to get INTx IRQ domain\n");
  488. return -ENODEV;
  489. }
  490. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  491. port->msi_domain = irq_domain_add_linear(node, MTK_MSI_IRQS_NUM,
  492. &msi_domain_ops,
  493. &mtk_pcie_msi_chip);
  494. if (!port->msi_domain) {
  495. dev_err(dev, "failed to create MSI IRQ domain\n");
  496. return -ENODEV;
  497. }
  498. mtk_pcie_enable_msi(port);
  499. }
  500. return 0;
  501. }
  502. static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
  503. {
  504. struct mtk_pcie_port *port = (struct mtk_pcie_port *)data;
  505. unsigned long status;
  506. u32 virq;
  507. u32 bit = INTX_SHIFT;
  508. while ((status = readl(port->base + PCIE_INT_STATUS)) & INTX_MASK) {
  509. for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
  510. /* Clear the INTx */
  511. writel(1 << bit, port->base + PCIE_INT_STATUS);
  512. virq = irq_find_mapping(port->irq_domain,
  513. bit - INTX_SHIFT);
  514. generic_handle_irq(virq);
  515. }
  516. }
  517. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  518. while ((status = readl(port->base + PCIE_INT_STATUS)) & MSI_STATUS) {
  519. unsigned long imsi_status;
  520. while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
  521. for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
  522. /* Clear the MSI */
  523. writel(1 << bit, port->base + PCIE_IMSI_STATUS);
  524. virq = irq_find_mapping(port->msi_domain, bit);
  525. generic_handle_irq(virq);
  526. }
  527. }
  528. /* Clear MSI interrupt status */
  529. writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
  530. }
  531. }
  532. return IRQ_HANDLED;
  533. }
  534. static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
  535. struct device_node *node)
  536. {
  537. struct mtk_pcie *pcie = port->pcie;
  538. struct device *dev = pcie->dev;
  539. struct platform_device *pdev = to_platform_device(dev);
  540. int err, irq;
  541. irq = platform_get_irq(pdev, port->slot);
  542. err = devm_request_irq(dev, irq, mtk_pcie_intr_handler,
  543. IRQF_SHARED, "mtk-pcie", port);
  544. if (err) {
  545. dev_err(dev, "unable to request IRQ %d\n", irq);
  546. return err;
  547. }
  548. err = mtk_pcie_init_irq_domain(port, node);
  549. if (err) {
  550. dev_err(dev, "failed to init PCIe IRQ domain\n");
  551. return err;
  552. }
  553. return 0;
  554. }
  555. static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
  556. unsigned int devfn, int where)
  557. {
  558. struct mtk_pcie *pcie = bus->sysdata;
  559. writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
  560. bus->number), pcie->base + PCIE_CFG_ADDR);
  561. return pcie->base + PCIE_CFG_DATA + (where & 3);
  562. }
  563. static struct pci_ops mtk_pcie_ops = {
  564. .map_bus = mtk_pcie_map_bus,
  565. .read = pci_generic_config_read,
  566. .write = pci_generic_config_write,
  567. };
  568. static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
  569. {
  570. struct mtk_pcie *pcie = port->pcie;
  571. u32 func = PCI_FUNC(port->slot << 3);
  572. u32 slot = PCI_SLOT(port->slot << 3);
  573. u32 val;
  574. int err;
  575. /* assert port PERST_N */
  576. val = readl(pcie->base + PCIE_SYS_CFG);
  577. val |= PCIE_PORT_PERST(port->slot);
  578. writel(val, pcie->base + PCIE_SYS_CFG);
  579. /* de-assert port PERST_N */
  580. val = readl(pcie->base + PCIE_SYS_CFG);
  581. val &= ~PCIE_PORT_PERST(port->slot);
  582. writel(val, pcie->base + PCIE_SYS_CFG);
  583. /* 100ms timeout value should be enough for Gen1/2 training */
  584. err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
  585. !!(val & PCIE_PORT_LINKUP), 20,
  586. 100 * USEC_PER_MSEC);
  587. if (err)
  588. return -ETIMEDOUT;
  589. /* enable interrupt */
  590. val = readl(pcie->base + PCIE_INT_ENABLE);
  591. val |= PCIE_PORT_INT_EN(port->slot);
  592. writel(val, pcie->base + PCIE_INT_ENABLE);
  593. /* map to all DDR region. We need to set it before cfg operation. */
  594. writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
  595. port->base + PCIE_BAR0_SETUP);
  596. /* configure class code and revision ID */
  597. writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
  598. /* configure FC credit */
  599. writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
  600. pcie->base + PCIE_CFG_ADDR);
  601. val = readl(pcie->base + PCIE_CFG_DATA);
  602. val &= ~PCIE_FC_CREDIT_MASK;
  603. val |= PCIE_FC_CREDIT_VAL(0x806c);
  604. writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
  605. pcie->base + PCIE_CFG_ADDR);
  606. writel(val, pcie->base + PCIE_CFG_DATA);
  607. /* configure RC FTS number to 250 when it leaves L0s */
  608. writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
  609. pcie->base + PCIE_CFG_ADDR);
  610. val = readl(pcie->base + PCIE_CFG_DATA);
  611. val &= ~PCIE_FTS_NUM_MASK;
  612. val |= PCIE_FTS_NUM_L0(0x50);
  613. writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
  614. pcie->base + PCIE_CFG_ADDR);
  615. writel(val, pcie->base + PCIE_CFG_DATA);
  616. return 0;
  617. }
  618. static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
  619. {
  620. struct mtk_pcie *pcie = port->pcie;
  621. struct device *dev = pcie->dev;
  622. int err;
  623. err = clk_prepare_enable(port->sys_ck);
  624. if (err) {
  625. dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
  626. goto err_sys_clk;
  627. }
  628. err = clk_prepare_enable(port->ahb_ck);
  629. if (err) {
  630. dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
  631. goto err_ahb_clk;
  632. }
  633. err = clk_prepare_enable(port->aux_ck);
  634. if (err) {
  635. dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
  636. goto err_aux_clk;
  637. }
  638. err = clk_prepare_enable(port->axi_ck);
  639. if (err) {
  640. dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
  641. goto err_axi_clk;
  642. }
  643. err = clk_prepare_enable(port->obff_ck);
  644. if (err) {
  645. dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
  646. goto err_obff_clk;
  647. }
  648. err = clk_prepare_enable(port->pipe_ck);
  649. if (err) {
  650. dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
  651. goto err_pipe_clk;
  652. }
  653. reset_control_assert(port->reset);
  654. reset_control_deassert(port->reset);
  655. err = phy_init(port->phy);
  656. if (err) {
  657. dev_err(dev, "failed to initialize port%d phy\n", port->slot);
  658. goto err_phy_init;
  659. }
  660. err = phy_power_on(port->phy);
  661. if (err) {
  662. dev_err(dev, "failed to power on port%d phy\n", port->slot);
  663. goto err_phy_on;
  664. }
  665. if (!pcie->soc->startup(port))
  666. return;
  667. dev_info(dev, "Port%d link down\n", port->slot);
  668. phy_power_off(port->phy);
  669. err_phy_on:
  670. phy_exit(port->phy);
  671. err_phy_init:
  672. clk_disable_unprepare(port->pipe_ck);
  673. err_pipe_clk:
  674. clk_disable_unprepare(port->obff_ck);
  675. err_obff_clk:
  676. clk_disable_unprepare(port->axi_ck);
  677. err_axi_clk:
  678. clk_disable_unprepare(port->aux_ck);
  679. err_aux_clk:
  680. clk_disable_unprepare(port->ahb_ck);
  681. err_ahb_clk:
  682. clk_disable_unprepare(port->sys_ck);
  683. err_sys_clk:
  684. mtk_pcie_port_free(port);
  685. }
  686. static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
  687. struct device_node *node,
  688. int slot)
  689. {
  690. struct mtk_pcie_port *port;
  691. struct resource *regs;
  692. struct device *dev = pcie->dev;
  693. struct platform_device *pdev = to_platform_device(dev);
  694. char name[10];
  695. int err;
  696. port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
  697. if (!port)
  698. return -ENOMEM;
  699. err = of_property_read_u32(node, "num-lanes", &port->lane);
  700. if (err) {
  701. dev_err(dev, "missing num-lanes property\n");
  702. return err;
  703. }
  704. snprintf(name, sizeof(name), "port%d", slot);
  705. regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  706. port->base = devm_ioremap_resource(dev, regs);
  707. if (IS_ERR(port->base)) {
  708. dev_err(dev, "failed to map port%d base\n", slot);
  709. return PTR_ERR(port->base);
  710. }
  711. snprintf(name, sizeof(name), "sys_ck%d", slot);
  712. port->sys_ck = devm_clk_get(dev, name);
  713. if (IS_ERR(port->sys_ck)) {
  714. dev_err(dev, "failed to get sys_ck%d clock\n", slot);
  715. return PTR_ERR(port->sys_ck);
  716. }
  717. /* sys_ck might be divided into the following parts in some chips */
  718. snprintf(name, sizeof(name), "ahb_ck%d", slot);
  719. port->ahb_ck = devm_clk_get(dev, name);
  720. if (IS_ERR(port->ahb_ck)) {
  721. if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER)
  722. return -EPROBE_DEFER;
  723. port->ahb_ck = NULL;
  724. }
  725. snprintf(name, sizeof(name), "axi_ck%d", slot);
  726. port->axi_ck = devm_clk_get(dev, name);
  727. if (IS_ERR(port->axi_ck)) {
  728. if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER)
  729. return -EPROBE_DEFER;
  730. port->axi_ck = NULL;
  731. }
  732. snprintf(name, sizeof(name), "aux_ck%d", slot);
  733. port->aux_ck = devm_clk_get(dev, name);
  734. if (IS_ERR(port->aux_ck)) {
  735. if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER)
  736. return -EPROBE_DEFER;
  737. port->aux_ck = NULL;
  738. }
  739. snprintf(name, sizeof(name), "obff_ck%d", slot);
  740. port->obff_ck = devm_clk_get(dev, name);
  741. if (IS_ERR(port->obff_ck)) {
  742. if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER)
  743. return -EPROBE_DEFER;
  744. port->obff_ck = NULL;
  745. }
  746. snprintf(name, sizeof(name), "pipe_ck%d", slot);
  747. port->pipe_ck = devm_clk_get(dev, name);
  748. if (IS_ERR(port->pipe_ck)) {
  749. if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER)
  750. return -EPROBE_DEFER;
  751. port->pipe_ck = NULL;
  752. }
  753. snprintf(name, sizeof(name), "pcie-rst%d", slot);
  754. port->reset = devm_reset_control_get_optional_exclusive(dev, name);
  755. if (PTR_ERR(port->reset) == -EPROBE_DEFER)
  756. return PTR_ERR(port->reset);
  757. /* some platforms may use default PHY setting */
  758. snprintf(name, sizeof(name), "pcie-phy%d", slot);
  759. port->phy = devm_phy_optional_get(dev, name);
  760. if (IS_ERR(port->phy))
  761. return PTR_ERR(port->phy);
  762. port->slot = slot;
  763. port->pcie = pcie;
  764. if (pcie->soc->setup_irq) {
  765. err = pcie->soc->setup_irq(port, node);
  766. if (err)
  767. return err;
  768. }
  769. INIT_LIST_HEAD(&port->list);
  770. list_add_tail(&port->list, &pcie->ports);
  771. return 0;
  772. }
  773. static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
  774. {
  775. struct device *dev = pcie->dev;
  776. struct platform_device *pdev = to_platform_device(dev);
  777. struct resource *regs;
  778. int err;
  779. /* get shared registers, which are optional */
  780. regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
  781. if (regs) {
  782. pcie->base = devm_ioremap_resource(dev, regs);
  783. if (IS_ERR(pcie->base)) {
  784. dev_err(dev, "failed to map shared register\n");
  785. return PTR_ERR(pcie->base);
  786. }
  787. }
  788. pcie->free_ck = devm_clk_get(dev, "free_ck");
  789. if (IS_ERR(pcie->free_ck)) {
  790. if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
  791. return -EPROBE_DEFER;
  792. pcie->free_ck = NULL;
  793. }
  794. if (dev->pm_domain) {
  795. pm_runtime_enable(dev);
  796. pm_runtime_get_sync(dev);
  797. }
  798. /* enable top level clock */
  799. err = clk_prepare_enable(pcie->free_ck);
  800. if (err) {
  801. dev_err(dev, "failed to enable free_ck\n");
  802. goto err_free_ck;
  803. }
  804. return 0;
  805. err_free_ck:
  806. if (dev->pm_domain) {
  807. pm_runtime_put_sync(dev);
  808. pm_runtime_disable(dev);
  809. }
  810. return err;
  811. }
  812. static int mtk_pcie_setup(struct mtk_pcie *pcie)
  813. {
  814. struct device *dev = pcie->dev;
  815. struct device_node *node = dev->of_node, *child;
  816. struct of_pci_range_parser parser;
  817. struct of_pci_range range;
  818. struct resource res;
  819. struct mtk_pcie_port *port, *tmp;
  820. int err;
  821. if (of_pci_range_parser_init(&parser, node)) {
  822. dev_err(dev, "missing \"ranges\" property\n");
  823. return -EINVAL;
  824. }
  825. for_each_of_pci_range(&parser, &range) {
  826. err = of_pci_range_to_resource(&range, node, &res);
  827. if (err < 0)
  828. return err;
  829. switch (res.flags & IORESOURCE_TYPE_BITS) {
  830. case IORESOURCE_IO:
  831. pcie->offset.io = res.start - range.pci_addr;
  832. memcpy(&pcie->pio, &res, sizeof(res));
  833. pcie->pio.name = node->full_name;
  834. pcie->io.start = range.cpu_addr;
  835. pcie->io.end = range.cpu_addr + range.size - 1;
  836. pcie->io.flags = IORESOURCE_MEM;
  837. pcie->io.name = "I/O";
  838. memcpy(&res, &pcie->io, sizeof(res));
  839. break;
  840. case IORESOURCE_MEM:
  841. pcie->offset.mem = res.start - range.pci_addr;
  842. memcpy(&pcie->mem, &res, sizeof(res));
  843. pcie->mem.name = "non-prefetchable";
  844. break;
  845. }
  846. }
  847. err = of_pci_parse_bus_range(node, &pcie->busn);
  848. if (err < 0) {
  849. dev_err(dev, "failed to parse bus ranges property: %d\n", err);
  850. pcie->busn.name = node->name;
  851. pcie->busn.start = 0;
  852. pcie->busn.end = 0xff;
  853. pcie->busn.flags = IORESOURCE_BUS;
  854. }
  855. for_each_available_child_of_node(node, child) {
  856. int slot;
  857. err = of_pci_get_devfn(child);
  858. if (err < 0) {
  859. dev_err(dev, "failed to parse devfn: %d\n", err);
  860. return err;
  861. }
  862. slot = PCI_SLOT(err);
  863. err = mtk_pcie_parse_port(pcie, child, slot);
  864. if (err)
  865. return err;
  866. }
  867. err = mtk_pcie_subsys_powerup(pcie);
  868. if (err)
  869. return err;
  870. /* enable each port, and then check link status */
  871. list_for_each_entry_safe(port, tmp, &pcie->ports, list)
  872. mtk_pcie_enable_port(port);
  873. /* power down PCIe subsys if slots are all empty (link down) */
  874. if (list_empty(&pcie->ports))
  875. mtk_pcie_subsys_powerdown(pcie);
  876. return 0;
  877. }
  878. static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
  879. {
  880. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  881. struct list_head *windows = &host->windows;
  882. struct device *dev = pcie->dev;
  883. int err;
  884. pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
  885. pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
  886. pci_add_resource(windows, &pcie->busn);
  887. err = devm_request_pci_bus_resources(dev, windows);
  888. if (err < 0)
  889. return err;
  890. pci_remap_iospace(&pcie->pio, pcie->io.start);
  891. return 0;
  892. }
  893. static int mtk_pcie_register_host(struct pci_host_bridge *host)
  894. {
  895. struct mtk_pcie *pcie = pci_host_bridge_priv(host);
  896. struct pci_bus *child;
  897. int err;
  898. host->busnr = pcie->busn.start;
  899. host->dev.parent = pcie->dev;
  900. host->ops = pcie->soc->ops;
  901. host->map_irq = of_irq_parse_and_map_pci;
  902. host->swizzle_irq = pci_common_swizzle;
  903. host->sysdata = pcie;
  904. if (IS_ENABLED(CONFIG_PCI_MSI) && pcie->soc->has_msi)
  905. host->msi = &mtk_pcie_msi_chip;
  906. err = pci_scan_root_bus_bridge(host);
  907. if (err < 0)
  908. return err;
  909. pci_bus_size_bridges(host->bus);
  910. pci_bus_assign_resources(host->bus);
  911. list_for_each_entry(child, &host->bus->children, node)
  912. pcie_bus_configure_settings(child);
  913. pci_bus_add_devices(host->bus);
  914. return 0;
  915. }
  916. static int mtk_pcie_probe(struct platform_device *pdev)
  917. {
  918. struct device *dev = &pdev->dev;
  919. struct mtk_pcie *pcie;
  920. struct pci_host_bridge *host;
  921. int err;
  922. host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
  923. if (!host)
  924. return -ENOMEM;
  925. pcie = pci_host_bridge_priv(host);
  926. pcie->dev = dev;
  927. pcie->soc = of_device_get_match_data(dev);
  928. platform_set_drvdata(pdev, pcie);
  929. INIT_LIST_HEAD(&pcie->ports);
  930. err = mtk_pcie_setup(pcie);
  931. if (err)
  932. return err;
  933. err = mtk_pcie_request_resources(pcie);
  934. if (err)
  935. goto put_resources;
  936. err = mtk_pcie_register_host(host);
  937. if (err)
  938. goto put_resources;
  939. return 0;
  940. put_resources:
  941. if (!list_empty(&pcie->ports))
  942. mtk_pcie_put_resources(pcie);
  943. return err;
  944. }
  945. static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
  946. .ops = &mtk_pcie_ops,
  947. .startup = mtk_pcie_startup_port,
  948. };
  949. static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
  950. .has_msi = true,
  951. .ops = &mtk_pcie_ops_v2,
  952. .startup = mtk_pcie_startup_port_v2,
  953. .setup_irq = mtk_pcie_setup_irq,
  954. };
  955. static const struct of_device_id mtk_pcie_ids[] = {
  956. { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
  957. { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
  958. { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_v2 },
  959. { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_v2 },
  960. {},
  961. };
  962. static struct platform_driver mtk_pcie_driver = {
  963. .probe = mtk_pcie_probe,
  964. .driver = {
  965. .name = "mtk-pcie",
  966. .of_match_table = mtk_pcie_ids,
  967. .suppress_bind_attrs = true,
  968. },
  969. };
  970. builtin_platform_driver(mtk_pcie_driver);