pcie-mediatek.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * MediaTek PCIe host controller driver.
  4. *
  5. * Copyright (c) 2017 MediaTek Inc.
  6. * Author: Ryder Lee <ryder.lee@mediatek.com>
  7. * Honghui Zhang <honghui.zhang@mediatek.com>
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/delay.h>
  11. #include <linux/iopoll.h>
  12. #include <linux/irq.h>
  13. #include <linux/irqchip/chained_irq.h>
  14. #include <linux/irqdomain.h>
  15. #include <linux/kernel.h>
  16. #include <linux/msi.h>
  17. #include <linux/module.h>
  18. #include <linux/of_address.h>
  19. #include <linux/of_pci.h>
  20. #include <linux/of_platform.h>
  21. #include <linux/pci.h>
  22. #include <linux/phy/phy.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/pm_runtime.h>
  25. #include <linux/reset.h>
  26. #include "../pci.h"
  27. /* PCIe shared registers */
  28. #define PCIE_SYS_CFG 0x00
  29. #define PCIE_INT_ENABLE 0x0c
  30. #define PCIE_CFG_ADDR 0x20
  31. #define PCIE_CFG_DATA 0x24
  32. /* PCIe per port registers */
  33. #define PCIE_BAR0_SETUP 0x10
  34. #define PCIE_CLASS 0x34
  35. #define PCIE_LINK_STATUS 0x50
  36. #define PCIE_PORT_INT_EN(x) BIT(20 + (x))
  37. #define PCIE_PORT_PERST(x) BIT(1 + (x))
  38. #define PCIE_PORT_LINKUP BIT(0)
  39. #define PCIE_BAR_MAP_MAX GENMASK(31, 16)
  40. #define PCIE_BAR_ENABLE BIT(0)
  41. #define PCIE_REVISION_ID BIT(0)
  42. #define PCIE_CLASS_CODE (0x60400 << 8)
  43. #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \
  44. ((((regn) >> 8) & GENMASK(3, 0)) << 24))
  45. #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8))
  46. #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11))
  47. #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16))
  48. #define PCIE_CONF_ADDR(regn, fun, dev, bus) \
  49. (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
  50. PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
  51. /* MediaTek specific configuration registers */
  52. #define PCIE_FTS_NUM 0x70c
  53. #define PCIE_FTS_NUM_MASK GENMASK(15, 8)
  54. #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
  55. #define PCIE_FC_CREDIT 0x73c
  56. #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
  57. #define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
  58. /* PCIe V2 share registers */
  59. #define PCIE_SYS_CFG_V2 0x0
  60. #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8)
  61. #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8)
  62. /* PCIe V2 per-port registers */
  63. #define PCIE_MSI_VECTOR 0x0c0
  64. #define PCIE_CONF_VEND_ID 0x100
  65. #define PCIE_CONF_CLASS_ID 0x106
  66. #define PCIE_INT_MASK 0x420
  67. #define INTX_MASK GENMASK(19, 16)
  68. #define INTX_SHIFT 16
  69. #define PCIE_INT_STATUS 0x424
  70. #define MSI_STATUS BIT(23)
  71. #define PCIE_IMSI_STATUS 0x42c
  72. #define PCIE_IMSI_ADDR 0x430
  73. #define MSI_MASK BIT(23)
  74. #define MTK_MSI_IRQS_NUM 32
  75. #define PCIE_AHB_TRANS_BASE0_L 0x438
  76. #define PCIE_AHB_TRANS_BASE0_H 0x43c
  77. #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0))
  78. #define PCIE_AXI_WINDOW0 0x448
  79. #define WIN_ENABLE BIT(7)
  80. /* PCIe V2 configuration transaction header */
  81. #define PCIE_CFG_HEADER0 0x460
  82. #define PCIE_CFG_HEADER1 0x464
  83. #define PCIE_CFG_HEADER2 0x468
  84. #define PCIE_CFG_WDATA 0x470
  85. #define PCIE_APP_TLP_REQ 0x488
  86. #define PCIE_CFG_RDATA 0x48c
  87. #define APP_CFG_REQ BIT(0)
  88. #define APP_CPL_STATUS GENMASK(7, 5)
  89. #define CFG_WRRD_TYPE_0 4
  90. #define CFG_WR_FMT 2
  91. #define CFG_RD_FMT 0
  92. #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0))
  93. #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24))
  94. #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29))
  95. #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2))
  96. #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16))
  97. #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19))
  98. #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24))
  99. #define CFG_HEADER_DW0(type, fmt) \
  100. (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
  101. #define CFG_HEADER_DW1(where, size) \
  102. (GENMASK(((size) - 1), 0) << ((where) & 0x3))
  103. #define CFG_HEADER_DW2(regn, fun, dev, bus) \
  104. (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
  105. CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
  106. #define PCIE_RST_CTRL 0x510
  107. #define PCIE_PHY_RSTB BIT(0)
  108. #define PCIE_PIPE_SRSTB BIT(1)
  109. #define PCIE_MAC_SRSTB BIT(2)
  110. #define PCIE_CRSTB BIT(3)
  111. #define PCIE_PERSTB BIT(8)
  112. #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13)
  113. #define PCIE_LINK_STATUS_V2 0x804
  114. #define PCIE_PORT_LINKUP_V2 BIT(10)
  115. struct mtk_pcie_port;
  116. /**
  117. * struct mtk_pcie_soc - differentiate between host generations
  118. * @need_fix_class_id: whether this host's class ID needed to be fixed or not
  119. * @ops: pointer to configuration access functions
  120. * @startup: pointer to controller setting functions
  121. * @setup_irq: pointer to initialize IRQ functions
  122. */
  123. struct mtk_pcie_soc {
  124. bool need_fix_class_id;
  125. struct pci_ops *ops;
  126. int (*startup)(struct mtk_pcie_port *port);
  127. int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
  128. };
  129. /**
  130. * struct mtk_pcie_port - PCIe port information
  131. * @base: IO mapped register base
  132. * @list: port list
  133. * @pcie: pointer to PCIe host info
  134. * @reset: pointer to port reset control
  135. * @sys_ck: pointer to transaction/data link layer clock
  136. * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
  137. * and RC initiated MMIO access
  138. * @axi_ck: pointer to application layer MMIO channel operating clock
  139. * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
  140. * when pcie_mac_ck/pcie_pipe_ck is turned off
  141. * @obff_ck: pointer to OBFF functional block operating clock
  142. * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
  143. * @phy: pointer to PHY control block
  144. * @lane: lane count
  145. * @slot: port slot
  146. * @irq: GIC irq
  147. * @irq_domain: legacy INTx IRQ domain
  148. * @inner_domain: inner IRQ domain
  149. * @msi_domain: MSI IRQ domain
  150. * @lock: protect the msi_irq_in_use bitmap
  151. * @msi_irq_in_use: bit map for assigned MSI IRQ
  152. */
  153. struct mtk_pcie_port {
  154. void __iomem *base;
  155. struct list_head list;
  156. struct mtk_pcie *pcie;
  157. struct reset_control *reset;
  158. struct clk *sys_ck;
  159. struct clk *ahb_ck;
  160. struct clk *axi_ck;
  161. struct clk *aux_ck;
  162. struct clk *obff_ck;
  163. struct clk *pipe_ck;
  164. struct phy *phy;
  165. u32 lane;
  166. u32 slot;
  167. int irq;
  168. struct irq_domain *irq_domain;
  169. struct irq_domain *inner_domain;
  170. struct irq_domain *msi_domain;
  171. struct mutex lock;
  172. DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
  173. };
  174. /**
  175. * struct mtk_pcie - PCIe host information
  176. * @dev: pointer to PCIe device
  177. * @base: IO mapped register base
  178. * @free_ck: free-run reference clock
  179. * @io: IO resource
  180. * @pio: PIO resource
  181. * @mem: non-prefetchable memory resource
  182. * @busn: bus range
  183. * @offset: IO / Memory offset
  184. * @ports: pointer to PCIe port information
  185. * @soc: pointer to SoC-dependent operations
  186. */
  187. struct mtk_pcie {
  188. struct device *dev;
  189. void __iomem *base;
  190. struct clk *free_ck;
  191. struct resource io;
  192. struct resource pio;
  193. struct resource mem;
  194. struct resource busn;
  195. struct {
  196. resource_size_t mem;
  197. resource_size_t io;
  198. } offset;
  199. struct list_head ports;
  200. const struct mtk_pcie_soc *soc;
  201. };
  202. static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
  203. {
  204. struct device *dev = pcie->dev;
  205. clk_disable_unprepare(pcie->free_ck);
  206. pm_runtime_put_sync(dev);
  207. pm_runtime_disable(dev);
  208. }
  209. static void mtk_pcie_port_free(struct mtk_pcie_port *port)
  210. {
  211. struct mtk_pcie *pcie = port->pcie;
  212. struct device *dev = pcie->dev;
  213. devm_iounmap(dev, port->base);
  214. list_del(&port->list);
  215. devm_kfree(dev, port);
  216. }
  217. static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
  218. {
  219. struct mtk_pcie_port *port, *tmp;
  220. list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
  221. phy_power_off(port->phy);
  222. phy_exit(port->phy);
  223. clk_disable_unprepare(port->pipe_ck);
  224. clk_disable_unprepare(port->obff_ck);
  225. clk_disable_unprepare(port->axi_ck);
  226. clk_disable_unprepare(port->aux_ck);
  227. clk_disable_unprepare(port->ahb_ck);
  228. clk_disable_unprepare(port->sys_ck);
  229. mtk_pcie_port_free(port);
  230. }
  231. mtk_pcie_subsys_powerdown(pcie);
  232. }
  233. static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
  234. {
  235. u32 val;
  236. int err;
  237. err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
  238. !(val & APP_CFG_REQ), 10,
  239. 100 * USEC_PER_MSEC);
  240. if (err)
  241. return PCIBIOS_SET_FAILED;
  242. if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
  243. return PCIBIOS_SET_FAILED;
  244. return PCIBIOS_SUCCESSFUL;
  245. }
  246. static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
  247. int where, int size, u32 *val)
  248. {
  249. u32 tmp;
  250. /* Write PCIe configuration transaction header for Cfgrd */
  251. writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
  252. port->base + PCIE_CFG_HEADER0);
  253. writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
  254. writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
  255. port->base + PCIE_CFG_HEADER2);
  256. /* Trigger h/w to transmit Cfgrd TLP */
  257. tmp = readl(port->base + PCIE_APP_TLP_REQ);
  258. tmp |= APP_CFG_REQ;
  259. writel(tmp, port->base + PCIE_APP_TLP_REQ);
  260. /* Check completion status */
  261. if (mtk_pcie_check_cfg_cpld(port))
  262. return PCIBIOS_SET_FAILED;
  263. /* Read cpld payload of Cfgrd */
  264. *val = readl(port->base + PCIE_CFG_RDATA);
  265. if (size == 1)
  266. *val = (*val >> (8 * (where & 3))) & 0xff;
  267. else if (size == 2)
  268. *val = (*val >> (8 * (where & 3))) & 0xffff;
  269. return PCIBIOS_SUCCESSFUL;
  270. }
  271. static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
  272. int where, int size, u32 val)
  273. {
  274. /* Write PCIe configuration transaction header for Cfgwr */
  275. writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
  276. port->base + PCIE_CFG_HEADER0);
  277. writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
  278. writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
  279. port->base + PCIE_CFG_HEADER2);
  280. /* Write Cfgwr data */
  281. val = val << 8 * (where & 3);
  282. writel(val, port->base + PCIE_CFG_WDATA);
  283. /* Trigger h/w to transmit Cfgwr TLP */
  284. val = readl(port->base + PCIE_APP_TLP_REQ);
  285. val |= APP_CFG_REQ;
  286. writel(val, port->base + PCIE_APP_TLP_REQ);
  287. /* Check completion status */
  288. return mtk_pcie_check_cfg_cpld(port);
  289. }
  290. static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
  291. unsigned int devfn)
  292. {
  293. struct mtk_pcie *pcie = bus->sysdata;
  294. struct mtk_pcie_port *port;
  295. struct pci_dev *dev = NULL;
  296. /*
  297. * Walk the bus hierarchy to get the devfn value
  298. * of the port in the root bus.
  299. */
  300. while (bus && bus->number) {
  301. dev = bus->self;
  302. bus = dev->bus;
  303. devfn = dev->devfn;
  304. }
  305. list_for_each_entry(port, &pcie->ports, list)
  306. if (port->slot == PCI_SLOT(devfn))
  307. return port;
  308. return NULL;
  309. }
  310. static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
  311. int where, int size, u32 *val)
  312. {
  313. struct mtk_pcie_port *port;
  314. u32 bn = bus->number;
  315. int ret;
  316. port = mtk_pcie_find_port(bus, devfn);
  317. if (!port) {
  318. *val = ~0;
  319. return PCIBIOS_DEVICE_NOT_FOUND;
  320. }
  321. ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
  322. if (ret)
  323. *val = ~0;
  324. return ret;
  325. }
  326. static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
  327. int where, int size, u32 val)
  328. {
  329. struct mtk_pcie_port *port;
  330. u32 bn = bus->number;
  331. port = mtk_pcie_find_port(bus, devfn);
  332. if (!port)
  333. return PCIBIOS_DEVICE_NOT_FOUND;
  334. return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
  335. }
  336. static struct pci_ops mtk_pcie_ops_v2 = {
  337. .read = mtk_pcie_config_read,
  338. .write = mtk_pcie_config_write,
  339. };
  340. static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  341. {
  342. struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
  343. phys_addr_t addr;
  344. /* MT2712/MT7622 only support 32-bit MSI addresses */
  345. addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
  346. msg->address_hi = 0;
  347. msg->address_lo = lower_32_bits(addr);
  348. msg->data = data->hwirq;
  349. dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
  350. (int)data->hwirq, msg->address_hi, msg->address_lo);
  351. }
  352. static int mtk_msi_set_affinity(struct irq_data *irq_data,
  353. const struct cpumask *mask, bool force)
  354. {
  355. return -EINVAL;
  356. }
  357. static void mtk_msi_ack_irq(struct irq_data *data)
  358. {
  359. struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
  360. u32 hwirq = data->hwirq;
  361. writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
  362. }
  363. static struct irq_chip mtk_msi_bottom_irq_chip = {
  364. .name = "MTK MSI",
  365. .irq_compose_msi_msg = mtk_compose_msi_msg,
  366. .irq_set_affinity = mtk_msi_set_affinity,
  367. .irq_ack = mtk_msi_ack_irq,
  368. };
  369. static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  370. unsigned int nr_irqs, void *args)
  371. {
  372. struct mtk_pcie_port *port = domain->host_data;
  373. unsigned long bit;
  374. WARN_ON(nr_irqs != 1);
  375. mutex_lock(&port->lock);
  376. bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
  377. if (bit >= MTK_MSI_IRQS_NUM) {
  378. mutex_unlock(&port->lock);
  379. return -ENOSPC;
  380. }
  381. __set_bit(bit, port->msi_irq_in_use);
  382. mutex_unlock(&port->lock);
  383. irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
  384. domain->host_data, handle_edge_irq,
  385. NULL, NULL);
  386. return 0;
  387. }
  388. static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
  389. unsigned int virq, unsigned int nr_irqs)
  390. {
  391. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  392. struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
  393. mutex_lock(&port->lock);
  394. if (!test_bit(d->hwirq, port->msi_irq_in_use))
  395. dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
  396. d->hwirq);
  397. else
  398. __clear_bit(d->hwirq, port->msi_irq_in_use);
  399. mutex_unlock(&port->lock);
  400. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  401. }
  402. static const struct irq_domain_ops msi_domain_ops = {
  403. .alloc = mtk_pcie_irq_domain_alloc,
  404. .free = mtk_pcie_irq_domain_free,
  405. };
  406. static struct irq_chip mtk_msi_irq_chip = {
  407. .name = "MTK PCIe MSI",
  408. .irq_ack = irq_chip_ack_parent,
  409. .irq_mask = pci_msi_mask_irq,
  410. .irq_unmask = pci_msi_unmask_irq,
  411. };
  412. static struct msi_domain_info mtk_msi_domain_info = {
  413. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  414. MSI_FLAG_PCI_MSIX),
  415. .chip = &mtk_msi_irq_chip,
  416. };
  417. static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
  418. {
  419. struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
  420. mutex_init(&port->lock);
  421. port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
  422. &msi_domain_ops, port);
  423. if (!port->inner_domain) {
  424. dev_err(port->pcie->dev, "failed to create IRQ domain\n");
  425. return -ENOMEM;
  426. }
  427. port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
  428. port->inner_domain);
  429. if (!port->msi_domain) {
  430. dev_err(port->pcie->dev, "failed to create MSI domain\n");
  431. irq_domain_remove(port->inner_domain);
  432. return -ENOMEM;
  433. }
  434. return 0;
  435. }
  436. static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
  437. {
  438. u32 val;
  439. phys_addr_t msg_addr;
  440. msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
  441. val = lower_32_bits(msg_addr);
  442. writel(val, port->base + PCIE_IMSI_ADDR);
  443. val = readl(port->base + PCIE_INT_MASK);
  444. val &= ~MSI_MASK;
  445. writel(val, port->base + PCIE_INT_MASK);
  446. }
  447. static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
  448. {
  449. struct mtk_pcie_port *port, *tmp;
  450. list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
  451. irq_set_chained_handler_and_data(port->irq, NULL, NULL);
  452. if (port->irq_domain)
  453. irq_domain_remove(port->irq_domain);
  454. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  455. if (port->msi_domain)
  456. irq_domain_remove(port->msi_domain);
  457. if (port->inner_domain)
  458. irq_domain_remove(port->inner_domain);
  459. }
  460. irq_dispose_mapping(port->irq);
  461. }
  462. }
  463. static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
  464. irq_hw_number_t hwirq)
  465. {
  466. irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
  467. irq_set_chip_data(irq, domain->host_data);
  468. return 0;
  469. }
  470. static const struct irq_domain_ops intx_domain_ops = {
  471. .map = mtk_pcie_intx_map,
  472. };
  473. static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
  474. struct device_node *node)
  475. {
  476. struct device *dev = port->pcie->dev;
  477. struct device_node *pcie_intc_node;
  478. int ret;
  479. /* Setup INTx */
  480. pcie_intc_node = of_get_next_child(node, NULL);
  481. if (!pcie_intc_node) {
  482. dev_err(dev, "no PCIe Intc node found\n");
  483. return -ENODEV;
  484. }
  485. port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
  486. &intx_domain_ops, port);
  487. if (!port->irq_domain) {
  488. dev_err(dev, "failed to get INTx IRQ domain\n");
  489. return -ENODEV;
  490. }
  491. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  492. ret = mtk_pcie_allocate_msi_domains(port);
  493. if (ret)
  494. return ret;
  495. }
  496. return 0;
  497. }
  498. static void mtk_pcie_intr_handler(struct irq_desc *desc)
  499. {
  500. struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
  501. struct irq_chip *irqchip = irq_desc_get_chip(desc);
  502. unsigned long status;
  503. u32 virq;
  504. u32 bit = INTX_SHIFT;
  505. chained_irq_enter(irqchip, desc);
  506. status = readl(port->base + PCIE_INT_STATUS);
  507. if (status & INTX_MASK) {
  508. for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
  509. /* Clear the INTx */
  510. writel(1 << bit, port->base + PCIE_INT_STATUS);
  511. virq = irq_find_mapping(port->irq_domain,
  512. bit - INTX_SHIFT);
  513. generic_handle_irq(virq);
  514. }
  515. }
  516. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  517. if (status & MSI_STATUS){
  518. unsigned long imsi_status;
  519. while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
  520. for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
  521. virq = irq_find_mapping(port->inner_domain, bit);
  522. generic_handle_irq(virq);
  523. }
  524. }
  525. /* Clear MSI interrupt status */
  526. writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
  527. }
  528. }
  529. chained_irq_exit(irqchip, desc);
  530. return;
  531. }
  532. static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
  533. struct device_node *node)
  534. {
  535. struct mtk_pcie *pcie = port->pcie;
  536. struct device *dev = pcie->dev;
  537. struct platform_device *pdev = to_platform_device(dev);
  538. int err;
  539. err = mtk_pcie_init_irq_domain(port, node);
  540. if (err) {
  541. dev_err(dev, "failed to init PCIe IRQ domain\n");
  542. return err;
  543. }
  544. port->irq = platform_get_irq(pdev, port->slot);
  545. irq_set_chained_handler_and_data(port->irq,
  546. mtk_pcie_intr_handler, port);
  547. return 0;
  548. }
  549. static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
  550. {
  551. struct mtk_pcie *pcie = port->pcie;
  552. struct resource *mem = &pcie->mem;
  553. const struct mtk_pcie_soc *soc = port->pcie->soc;
  554. u32 val;
  555. size_t size;
  556. int err;
  557. /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
  558. if (pcie->base) {
  559. val = readl(pcie->base + PCIE_SYS_CFG_V2);
  560. val |= PCIE_CSR_LTSSM_EN(port->slot) |
  561. PCIE_CSR_ASPM_L1_EN(port->slot);
  562. writel(val, pcie->base + PCIE_SYS_CFG_V2);
  563. }
  564. /* Assert all reset signals */
  565. writel(0, port->base + PCIE_RST_CTRL);
  566. /*
  567. * Enable PCIe link down reset, if link status changed from link up to
  568. * link down, this will reset MAC control registers and configuration
  569. * space.
  570. */
  571. writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
  572. /* De-assert PHY, PE, PIPE, MAC and configuration reset */
  573. val = readl(port->base + PCIE_RST_CTRL);
  574. val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
  575. PCIE_MAC_SRSTB | PCIE_CRSTB;
  576. writel(val, port->base + PCIE_RST_CTRL);
  577. /* Set up vendor ID and class code */
  578. if (soc->need_fix_class_id) {
  579. val = PCI_VENDOR_ID_MEDIATEK;
  580. writew(val, port->base + PCIE_CONF_VEND_ID);
  581. val = PCI_CLASS_BRIDGE_PCI;
  582. writew(val, port->base + PCIE_CONF_CLASS_ID);
  583. }
  584. /* 100ms timeout value should be enough for Gen1/2 training */
  585. err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
  586. !!(val & PCIE_PORT_LINKUP_V2), 20,
  587. 100 * USEC_PER_MSEC);
  588. if (err)
  589. return -ETIMEDOUT;
  590. /* Set INTx mask */
  591. val = readl(port->base + PCIE_INT_MASK);
  592. val &= ~INTX_MASK;
  593. writel(val, port->base + PCIE_INT_MASK);
  594. if (IS_ENABLED(CONFIG_PCI_MSI))
  595. mtk_pcie_enable_msi(port);
  596. /* Set AHB to PCIe translation windows */
  597. size = mem->end - mem->start;
  598. val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
  599. writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
  600. val = upper_32_bits(mem->start);
  601. writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
  602. /* Set PCIe to AXI translation memory space.*/
  603. val = fls(0xffffffff) | WIN_ENABLE;
  604. writel(val, port->base + PCIE_AXI_WINDOW0);
  605. return 0;
  606. }
  607. static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
  608. unsigned int devfn, int where)
  609. {
  610. struct mtk_pcie *pcie = bus->sysdata;
  611. writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
  612. bus->number), pcie->base + PCIE_CFG_ADDR);
  613. return pcie->base + PCIE_CFG_DATA + (where & 3);
  614. }
  615. static struct pci_ops mtk_pcie_ops = {
  616. .map_bus = mtk_pcie_map_bus,
  617. .read = pci_generic_config_read,
  618. .write = pci_generic_config_write,
  619. };
  620. static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
  621. {
  622. struct mtk_pcie *pcie = port->pcie;
  623. u32 func = PCI_FUNC(port->slot << 3);
  624. u32 slot = PCI_SLOT(port->slot << 3);
  625. u32 val;
  626. int err;
  627. /* assert port PERST_N */
  628. val = readl(pcie->base + PCIE_SYS_CFG);
  629. val |= PCIE_PORT_PERST(port->slot);
  630. writel(val, pcie->base + PCIE_SYS_CFG);
  631. /* de-assert port PERST_N */
  632. val = readl(pcie->base + PCIE_SYS_CFG);
  633. val &= ~PCIE_PORT_PERST(port->slot);
  634. writel(val, pcie->base + PCIE_SYS_CFG);
  635. /* 100ms timeout value should be enough for Gen1/2 training */
  636. err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
  637. !!(val & PCIE_PORT_LINKUP), 20,
  638. 100 * USEC_PER_MSEC);
  639. if (err)
  640. return -ETIMEDOUT;
  641. /* enable interrupt */
  642. val = readl(pcie->base + PCIE_INT_ENABLE);
  643. val |= PCIE_PORT_INT_EN(port->slot);
  644. writel(val, pcie->base + PCIE_INT_ENABLE);
  645. /* map to all DDR region. We need to set it before cfg operation. */
  646. writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
  647. port->base + PCIE_BAR0_SETUP);
  648. /* configure class code and revision ID */
  649. writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
  650. /* configure FC credit */
  651. writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
  652. pcie->base + PCIE_CFG_ADDR);
  653. val = readl(pcie->base + PCIE_CFG_DATA);
  654. val &= ~PCIE_FC_CREDIT_MASK;
  655. val |= PCIE_FC_CREDIT_VAL(0x806c);
  656. writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
  657. pcie->base + PCIE_CFG_ADDR);
  658. writel(val, pcie->base + PCIE_CFG_DATA);
  659. /* configure RC FTS number to 250 when it leaves L0s */
  660. writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
  661. pcie->base + PCIE_CFG_ADDR);
  662. val = readl(pcie->base + PCIE_CFG_DATA);
  663. val &= ~PCIE_FTS_NUM_MASK;
  664. val |= PCIE_FTS_NUM_L0(0x50);
  665. writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
  666. pcie->base + PCIE_CFG_ADDR);
  667. writel(val, pcie->base + PCIE_CFG_DATA);
  668. return 0;
  669. }
  670. static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
  671. {
  672. struct mtk_pcie *pcie = port->pcie;
  673. struct device *dev = pcie->dev;
  674. int err;
  675. err = clk_prepare_enable(port->sys_ck);
  676. if (err) {
  677. dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
  678. goto err_sys_clk;
  679. }
  680. err = clk_prepare_enable(port->ahb_ck);
  681. if (err) {
  682. dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
  683. goto err_ahb_clk;
  684. }
  685. err = clk_prepare_enable(port->aux_ck);
  686. if (err) {
  687. dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
  688. goto err_aux_clk;
  689. }
  690. err = clk_prepare_enable(port->axi_ck);
  691. if (err) {
  692. dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
  693. goto err_axi_clk;
  694. }
  695. err = clk_prepare_enable(port->obff_ck);
  696. if (err) {
  697. dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
  698. goto err_obff_clk;
  699. }
  700. err = clk_prepare_enable(port->pipe_ck);
  701. if (err) {
  702. dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
  703. goto err_pipe_clk;
  704. }
  705. reset_control_assert(port->reset);
  706. reset_control_deassert(port->reset);
  707. err = phy_init(port->phy);
  708. if (err) {
  709. dev_err(dev, "failed to initialize port%d phy\n", port->slot);
  710. goto err_phy_init;
  711. }
  712. err = phy_power_on(port->phy);
  713. if (err) {
  714. dev_err(dev, "failed to power on port%d phy\n", port->slot);
  715. goto err_phy_on;
  716. }
  717. if (!pcie->soc->startup(port))
  718. return;
  719. dev_info(dev, "Port%d link down\n", port->slot);
  720. phy_power_off(port->phy);
  721. err_phy_on:
  722. phy_exit(port->phy);
  723. err_phy_init:
  724. clk_disable_unprepare(port->pipe_ck);
  725. err_pipe_clk:
  726. clk_disable_unprepare(port->obff_ck);
  727. err_obff_clk:
  728. clk_disable_unprepare(port->axi_ck);
  729. err_axi_clk:
  730. clk_disable_unprepare(port->aux_ck);
  731. err_aux_clk:
  732. clk_disable_unprepare(port->ahb_ck);
  733. err_ahb_clk:
  734. clk_disable_unprepare(port->sys_ck);
  735. err_sys_clk:
  736. mtk_pcie_port_free(port);
  737. }
  738. static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
  739. struct device_node *node,
  740. int slot)
  741. {
  742. struct mtk_pcie_port *port;
  743. struct resource *regs;
  744. struct device *dev = pcie->dev;
  745. struct platform_device *pdev = to_platform_device(dev);
  746. char name[10];
  747. int err;
  748. port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
  749. if (!port)
  750. return -ENOMEM;
  751. err = of_property_read_u32(node, "num-lanes", &port->lane);
  752. if (err) {
  753. dev_err(dev, "missing num-lanes property\n");
  754. return err;
  755. }
  756. snprintf(name, sizeof(name), "port%d", slot);
  757. regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  758. port->base = devm_ioremap_resource(dev, regs);
  759. if (IS_ERR(port->base)) {
  760. dev_err(dev, "failed to map port%d base\n", slot);
  761. return PTR_ERR(port->base);
  762. }
  763. snprintf(name, sizeof(name), "sys_ck%d", slot);
  764. port->sys_ck = devm_clk_get(dev, name);
  765. if (IS_ERR(port->sys_ck)) {
  766. dev_err(dev, "failed to get sys_ck%d clock\n", slot);
  767. return PTR_ERR(port->sys_ck);
  768. }
  769. /* sys_ck might be divided into the following parts in some chips */
  770. snprintf(name, sizeof(name), "ahb_ck%d", slot);
  771. port->ahb_ck = devm_clk_get(dev, name);
  772. if (IS_ERR(port->ahb_ck)) {
  773. if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER)
  774. return -EPROBE_DEFER;
  775. port->ahb_ck = NULL;
  776. }
  777. snprintf(name, sizeof(name), "axi_ck%d", slot);
  778. port->axi_ck = devm_clk_get(dev, name);
  779. if (IS_ERR(port->axi_ck)) {
  780. if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER)
  781. return -EPROBE_DEFER;
  782. port->axi_ck = NULL;
  783. }
  784. snprintf(name, sizeof(name), "aux_ck%d", slot);
  785. port->aux_ck = devm_clk_get(dev, name);
  786. if (IS_ERR(port->aux_ck)) {
  787. if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER)
  788. return -EPROBE_DEFER;
  789. port->aux_ck = NULL;
  790. }
  791. snprintf(name, sizeof(name), "obff_ck%d", slot);
  792. port->obff_ck = devm_clk_get(dev, name);
  793. if (IS_ERR(port->obff_ck)) {
  794. if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER)
  795. return -EPROBE_DEFER;
  796. port->obff_ck = NULL;
  797. }
  798. snprintf(name, sizeof(name), "pipe_ck%d", slot);
  799. port->pipe_ck = devm_clk_get(dev, name);
  800. if (IS_ERR(port->pipe_ck)) {
  801. if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER)
  802. return -EPROBE_DEFER;
  803. port->pipe_ck = NULL;
  804. }
  805. snprintf(name, sizeof(name), "pcie-rst%d", slot);
  806. port->reset = devm_reset_control_get_optional_exclusive(dev, name);
  807. if (PTR_ERR(port->reset) == -EPROBE_DEFER)
  808. return PTR_ERR(port->reset);
  809. /* some platforms may use default PHY setting */
  810. snprintf(name, sizeof(name), "pcie-phy%d", slot);
  811. port->phy = devm_phy_optional_get(dev, name);
  812. if (IS_ERR(port->phy))
  813. return PTR_ERR(port->phy);
  814. port->slot = slot;
  815. port->pcie = pcie;
  816. if (pcie->soc->setup_irq) {
  817. err = pcie->soc->setup_irq(port, node);
  818. if (err)
  819. return err;
  820. }
  821. INIT_LIST_HEAD(&port->list);
  822. list_add_tail(&port->list, &pcie->ports);
  823. return 0;
  824. }
  825. static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
  826. {
  827. struct device *dev = pcie->dev;
  828. struct platform_device *pdev = to_platform_device(dev);
  829. struct resource *regs;
  830. int err;
  831. /* get shared registers, which are optional */
  832. regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
  833. if (regs) {
  834. pcie->base = devm_ioremap_resource(dev, regs);
  835. if (IS_ERR(pcie->base)) {
  836. dev_err(dev, "failed to map shared register\n");
  837. return PTR_ERR(pcie->base);
  838. }
  839. }
  840. pcie->free_ck = devm_clk_get(dev, "free_ck");
  841. if (IS_ERR(pcie->free_ck)) {
  842. if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
  843. return -EPROBE_DEFER;
  844. pcie->free_ck = NULL;
  845. }
  846. pm_runtime_enable(dev);
  847. pm_runtime_get_sync(dev);
  848. /* enable top level clock */
  849. err = clk_prepare_enable(pcie->free_ck);
  850. if (err) {
  851. dev_err(dev, "failed to enable free_ck\n");
  852. goto err_free_ck;
  853. }
  854. return 0;
  855. err_free_ck:
  856. pm_runtime_put_sync(dev);
  857. pm_runtime_disable(dev);
  858. return err;
  859. }
  860. static int mtk_pcie_setup(struct mtk_pcie *pcie)
  861. {
  862. struct device *dev = pcie->dev;
  863. struct device_node *node = dev->of_node, *child;
  864. struct of_pci_range_parser parser;
  865. struct of_pci_range range;
  866. struct resource res;
  867. struct mtk_pcie_port *port, *tmp;
  868. int err;
  869. if (of_pci_range_parser_init(&parser, node)) {
  870. dev_err(dev, "missing \"ranges\" property\n");
  871. return -EINVAL;
  872. }
  873. for_each_of_pci_range(&parser, &range) {
  874. err = of_pci_range_to_resource(&range, node, &res);
  875. if (err < 0)
  876. return err;
  877. switch (res.flags & IORESOURCE_TYPE_BITS) {
  878. case IORESOURCE_IO:
  879. pcie->offset.io = res.start - range.pci_addr;
  880. memcpy(&pcie->pio, &res, sizeof(res));
  881. pcie->pio.name = node->full_name;
  882. pcie->io.start = range.cpu_addr;
  883. pcie->io.end = range.cpu_addr + range.size - 1;
  884. pcie->io.flags = IORESOURCE_MEM;
  885. pcie->io.name = "I/O";
  886. memcpy(&res, &pcie->io, sizeof(res));
  887. break;
  888. case IORESOURCE_MEM:
  889. pcie->offset.mem = res.start - range.pci_addr;
  890. memcpy(&pcie->mem, &res, sizeof(res));
  891. pcie->mem.name = "non-prefetchable";
  892. break;
  893. }
  894. }
  895. err = of_pci_parse_bus_range(node, &pcie->busn);
  896. if (err < 0) {
  897. dev_err(dev, "failed to parse bus ranges property: %d\n", err);
  898. pcie->busn.name = node->name;
  899. pcie->busn.start = 0;
  900. pcie->busn.end = 0xff;
  901. pcie->busn.flags = IORESOURCE_BUS;
  902. }
  903. for_each_available_child_of_node(node, child) {
  904. int slot;
  905. err = of_pci_get_devfn(child);
  906. if (err < 0) {
  907. dev_err(dev, "failed to parse devfn: %d\n", err);
  908. return err;
  909. }
  910. slot = PCI_SLOT(err);
  911. err = mtk_pcie_parse_port(pcie, child, slot);
  912. if (err)
  913. return err;
  914. }
  915. err = mtk_pcie_subsys_powerup(pcie);
  916. if (err)
  917. return err;
  918. /* enable each port, and then check link status */
  919. list_for_each_entry_safe(port, tmp, &pcie->ports, list)
  920. mtk_pcie_enable_port(port);
  921. /* power down PCIe subsys if slots are all empty (link down) */
  922. if (list_empty(&pcie->ports))
  923. mtk_pcie_subsys_powerdown(pcie);
  924. return 0;
  925. }
  926. static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
  927. {
  928. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  929. struct list_head *windows = &host->windows;
  930. struct device *dev = pcie->dev;
  931. int err;
  932. pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
  933. pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
  934. pci_add_resource(windows, &pcie->busn);
  935. err = devm_request_pci_bus_resources(dev, windows);
  936. if (err < 0)
  937. return err;
  938. err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
  939. if (err)
  940. return err;
  941. return 0;
  942. }
  943. static int mtk_pcie_probe(struct platform_device *pdev)
  944. {
  945. struct device *dev = &pdev->dev;
  946. struct mtk_pcie *pcie;
  947. struct pci_host_bridge *host;
  948. int err;
  949. host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
  950. if (!host)
  951. return -ENOMEM;
  952. pcie = pci_host_bridge_priv(host);
  953. pcie->dev = dev;
  954. pcie->soc = of_device_get_match_data(dev);
  955. platform_set_drvdata(pdev, pcie);
  956. INIT_LIST_HEAD(&pcie->ports);
  957. err = mtk_pcie_setup(pcie);
  958. if (err)
  959. return err;
  960. err = mtk_pcie_request_resources(pcie);
  961. if (err)
  962. goto put_resources;
  963. host->busnr = pcie->busn.start;
  964. host->dev.parent = pcie->dev;
  965. host->ops = pcie->soc->ops;
  966. host->map_irq = of_irq_parse_and_map_pci;
  967. host->swizzle_irq = pci_common_swizzle;
  968. host->sysdata = pcie;
  969. err = pci_host_probe(host);
  970. if (err)
  971. goto put_resources;
  972. return 0;
  973. put_resources:
  974. if (!list_empty(&pcie->ports))
  975. mtk_pcie_put_resources(pcie);
  976. return err;
  977. }
  978. static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
  979. {
  980. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  981. struct list_head *windows = &host->windows;
  982. pci_free_resource_list(windows);
  983. }
  984. static int mtk_pcie_remove(struct platform_device *pdev)
  985. {
  986. struct mtk_pcie *pcie = platform_get_drvdata(pdev);
  987. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  988. pci_stop_root_bus(host->bus);
  989. pci_remove_root_bus(host->bus);
  990. mtk_pcie_free_resources(pcie);
  991. mtk_pcie_irq_teardown(pcie);
  992. mtk_pcie_put_resources(pcie);
  993. return 0;
  994. }
  995. static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
  996. {
  997. struct mtk_pcie *pcie = dev_get_drvdata(dev);
  998. struct mtk_pcie_port *port;
  999. if (list_empty(&pcie->ports))
  1000. return 0;
  1001. list_for_each_entry(port, &pcie->ports, list) {
  1002. clk_disable_unprepare(port->pipe_ck);
  1003. clk_disable_unprepare(port->obff_ck);
  1004. clk_disable_unprepare(port->axi_ck);
  1005. clk_disable_unprepare(port->aux_ck);
  1006. clk_disable_unprepare(port->ahb_ck);
  1007. clk_disable_unprepare(port->sys_ck);
  1008. phy_power_off(port->phy);
  1009. phy_exit(port->phy);
  1010. }
  1011. clk_disable_unprepare(pcie->free_ck);
  1012. return 0;
  1013. }
  1014. static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
  1015. {
  1016. struct mtk_pcie *pcie = dev_get_drvdata(dev);
  1017. struct mtk_pcie_port *port, *tmp;
  1018. if (list_empty(&pcie->ports))
  1019. return 0;
  1020. clk_prepare_enable(pcie->free_ck);
  1021. list_for_each_entry_safe(port, tmp, &pcie->ports, list)
  1022. mtk_pcie_enable_port(port);
  1023. /* In case of EP was removed while system suspend. */
  1024. if (list_empty(&pcie->ports))
  1025. clk_disable_unprepare(pcie->free_ck);
  1026. return 0;
  1027. }
  1028. static const struct dev_pm_ops mtk_pcie_pm_ops = {
  1029. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
  1030. mtk_pcie_resume_noirq)
  1031. };
  1032. static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
  1033. .ops = &mtk_pcie_ops,
  1034. .startup = mtk_pcie_startup_port,
  1035. };
  1036. static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
  1037. .ops = &mtk_pcie_ops_v2,
  1038. .startup = mtk_pcie_startup_port_v2,
  1039. .setup_irq = mtk_pcie_setup_irq,
  1040. };
  1041. static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
  1042. .need_fix_class_id = true,
  1043. .ops = &mtk_pcie_ops_v2,
  1044. .startup = mtk_pcie_startup_port_v2,
  1045. .setup_irq = mtk_pcie_setup_irq,
  1046. };
  1047. static const struct of_device_id mtk_pcie_ids[] = {
  1048. { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
  1049. { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
  1050. { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
  1051. { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
  1052. {},
  1053. };
  1054. static struct platform_driver mtk_pcie_driver = {
  1055. .probe = mtk_pcie_probe,
  1056. .remove = mtk_pcie_remove,
  1057. .driver = {
  1058. .name = "mtk-pcie",
  1059. .of_match_table = mtk_pcie_ids,
  1060. .suppress_bind_attrs = true,
  1061. .pm = &mtk_pcie_pm_ops,
  1062. },
  1063. };
  1064. module_platform_driver(mtk_pcie_driver);
  1065. MODULE_LICENSE("GPL v2");