pcie-rcar.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCIe driver for Renesas R-Car SoCs
  4. * Copyright (C) 2014 Renesas Electronics Europe Ltd
  5. *
  6. * Based on:
  7. * arch/sh/drivers/pci/pcie-sh7786.c
  8. * arch/sh/drivers/pci/ops-sh7786.c
  9. * Copyright (C) 2009 - 2011 Paul Mundt
  10. *
  11. * Author: Phil Edworthy <phil.edworthy@renesas.com>
  12. */
  13. #include <linux/bitops.h>
  14. #include <linux/clk.h>
  15. #include <linux/delay.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/irq.h>
  18. #include <linux/irqdomain.h>
  19. #include <linux/kernel.h>
  20. #include <linux/init.h>
  21. #include <linux/msi.h>
  22. #include <linux/of_address.h>
  23. #include <linux/of_irq.h>
  24. #include <linux/of_pci.h>
  25. #include <linux/of_platform.h>
  26. #include <linux/pci.h>
  27. #include <linux/phy/phy.h>
  28. #include <linux/platform_device.h>
  29. #include <linux/pm_runtime.h>
  30. #include <linux/slab.h>
  31. #include "../pci.h"
  32. #define PCIECAR 0x000010
  33. #define PCIECCTLR 0x000018
  34. #define CONFIG_SEND_ENABLE BIT(31)
  35. #define TYPE0 (0 << 8)
  36. #define TYPE1 BIT(8)
  37. #define PCIECDR 0x000020
  38. #define PCIEMSR 0x000028
  39. #define PCIEINTXR 0x000400
  40. #define PCIEPHYSR 0x0007f0
  41. #define PHYRDY BIT(0)
  42. #define PCIEMSITXR 0x000840
  43. /* Transfer control */
  44. #define PCIETCTLR 0x02000
  45. #define DL_DOWN BIT(3)
  46. #define CFINIT 1
  47. #define PCIETSTR 0x02004
  48. #define DATA_LINK_ACTIVE 1
  49. #define PCIEERRFR 0x02020
  50. #define UNSUPPORTED_REQUEST BIT(4)
  51. #define PCIEMSIFR 0x02044
  52. #define PCIEMSIALR 0x02048
  53. #define MSIFE 1
  54. #define PCIEMSIAUR 0x0204c
  55. #define PCIEMSIIER 0x02050
  56. /* root port address */
  57. #define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
  58. /* local address reg & mask */
  59. #define PCIELAR(x) (0x02200 + ((x) * 0x20))
  60. #define PCIELAMR(x) (0x02208 + ((x) * 0x20))
  61. #define LAM_PREFETCH BIT(3)
  62. #define LAM_64BIT BIT(2)
  63. #define LAR_ENABLE BIT(1)
  64. /* PCIe address reg & mask */
  65. #define PCIEPALR(x) (0x03400 + ((x) * 0x20))
  66. #define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
  67. #define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
  68. #define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
  69. #define PAR_ENABLE BIT(31)
  70. #define IO_SPACE BIT(8)
  71. /* Configuration */
  72. #define PCICONF(x) (0x010000 + ((x) * 0x4))
  73. #define PMCAP(x) (0x010040 + ((x) * 0x4))
  74. #define EXPCAP(x) (0x010070 + ((x) * 0x4))
  75. #define VCCAP(x) (0x010100 + ((x) * 0x4))
  76. /* link layer */
  77. #define IDSETR1 0x011004
  78. #define TLCTLR 0x011048
  79. #define MACSR 0x011054
  80. #define SPCHGFIN BIT(4)
  81. #define SPCHGFAIL BIT(6)
  82. #define SPCHGSUC BIT(7)
  83. #define LINK_SPEED (0xf << 16)
  84. #define LINK_SPEED_2_5GTS (1 << 16)
  85. #define LINK_SPEED_5_0GTS (2 << 16)
  86. #define MACCTLR 0x011058
  87. #define SPEED_CHANGE BIT(24)
  88. #define SCRAMBLE_DISABLE BIT(27)
  89. #define PMSR 0x01105c
  90. #define MACS2R 0x011078
  91. #define MACCGSPSETR 0x011084
  92. #define SPCNGRSN BIT(31)
  93. /* R-Car H1 PHY */
  94. #define H1_PCIEPHYADRR 0x04000c
  95. #define WRITE_CMD BIT(16)
  96. #define PHY_ACK BIT(24)
  97. #define RATE_POS 12
  98. #define LANE_POS 8
  99. #define ADR_POS 0
  100. #define H1_PCIEPHYDOUTR 0x040014
  101. /* R-Car Gen2 PHY */
  102. #define GEN2_PCIEPHYADDR 0x780
  103. #define GEN2_PCIEPHYDATA 0x784
  104. #define GEN2_PCIEPHYCTRL 0x78c
  105. #define INT_PCI_MSI_NR 32
  106. #define RCONF(x) (PCICONF(0) + (x))
  107. #define RPMCAP(x) (PMCAP(0) + (x))
  108. #define REXPCAP(x) (EXPCAP(0) + (x))
  109. #define RVCCAP(x) (VCCAP(0) + (x))
  110. #define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
  111. #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
  112. #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
  113. #define RCAR_PCI_MAX_RESOURCES 4
  114. #define MAX_NR_INBOUND_MAPS 6
  115. struct rcar_msi {
  116. DECLARE_BITMAP(used, INT_PCI_MSI_NR);
  117. struct irq_domain *domain;
  118. struct msi_controller chip;
  119. unsigned long pages;
  120. struct mutex lock;
  121. int irq1;
  122. int irq2;
  123. };
  124. static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
  125. {
  126. return container_of(chip, struct rcar_msi, chip);
  127. }
  128. /* Structure representing the PCIe interface */
  129. struct rcar_pcie {
  130. struct device *dev;
  131. struct phy *phy;
  132. void __iomem *base;
  133. struct list_head resources;
  134. int root_bus_nr;
  135. struct clk *bus_clk;
  136. struct rcar_msi msi;
  137. };
  138. static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
  139. unsigned long reg)
  140. {
  141. writel(val, pcie->base + reg);
  142. }
  143. static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie,
  144. unsigned long reg)
  145. {
  146. return readl(pcie->base + reg);
  147. }
  148. enum {
  149. RCAR_PCI_ACCESS_READ,
  150. RCAR_PCI_ACCESS_WRITE,
  151. };
  152. static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
  153. {
  154. int shift = 8 * (where & 3);
  155. u32 val = rcar_pci_read_reg(pcie, where & ~3);
  156. val &= ~(mask << shift);
  157. val |= data << shift;
  158. rcar_pci_write_reg(pcie, val, where & ~3);
  159. }
  160. static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
  161. {
  162. int shift = 8 * (where & 3);
  163. u32 val = rcar_pci_read_reg(pcie, where & ~3);
  164. return val >> shift;
  165. }
  166. /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
  167. static int rcar_pcie_config_access(struct rcar_pcie *pcie,
  168. unsigned char access_type, struct pci_bus *bus,
  169. unsigned int devfn, int where, u32 *data)
  170. {
  171. int dev, func, reg, index;
  172. dev = PCI_SLOT(devfn);
  173. func = PCI_FUNC(devfn);
  174. reg = where & ~3;
  175. index = reg / 4;
  176. /*
  177. * While each channel has its own memory-mapped extended config
  178. * space, it's generally only accessible when in endpoint mode.
  179. * When in root complex mode, the controller is unable to target
  180. * itself with either type 0 or type 1 accesses, and indeed, any
  181. * controller initiated target transfer to its own config space
  182. * result in a completer abort.
  183. *
  184. * Each channel effectively only supports a single device, but as
  185. * the same channel <-> device access works for any PCI_SLOT()
  186. * value, we cheat a bit here and bind the controller's config
  187. * space to devfn 0 in order to enable self-enumeration. In this
  188. * case the regular ECAR/ECDR path is sidelined and the mangled
  189. * config access itself is initiated as an internal bus transaction.
  190. */
  191. if (pci_is_root_bus(bus)) {
  192. if (dev != 0)
  193. return PCIBIOS_DEVICE_NOT_FOUND;
  194. if (access_type == RCAR_PCI_ACCESS_READ) {
  195. *data = rcar_pci_read_reg(pcie, PCICONF(index));
  196. } else {
  197. /* Keep an eye out for changes to the root bus number */
  198. if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
  199. pcie->root_bus_nr = *data & 0xff;
  200. rcar_pci_write_reg(pcie, *data, PCICONF(index));
  201. }
  202. return PCIBIOS_SUCCESSFUL;
  203. }
  204. if (pcie->root_bus_nr < 0)
  205. return PCIBIOS_DEVICE_NOT_FOUND;
  206. /* Clear errors */
  207. rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
  208. /* Set the PIO address */
  209. rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
  210. PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
  211. /* Enable the configuration access */
  212. if (bus->parent->number == pcie->root_bus_nr)
  213. rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
  214. else
  215. rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
  216. /* Check for errors */
  217. if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
  218. return PCIBIOS_DEVICE_NOT_FOUND;
  219. /* Check for master and target aborts */
  220. if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
  221. (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
  222. return PCIBIOS_DEVICE_NOT_FOUND;
  223. if (access_type == RCAR_PCI_ACCESS_READ)
  224. *data = rcar_pci_read_reg(pcie, PCIECDR);
  225. else
  226. rcar_pci_write_reg(pcie, *data, PCIECDR);
  227. /* Disable the configuration access */
  228. rcar_pci_write_reg(pcie, 0, PCIECCTLR);
  229. return PCIBIOS_SUCCESSFUL;
  230. }
  231. static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
  232. int where, int size, u32 *val)
  233. {
  234. struct rcar_pcie *pcie = bus->sysdata;
  235. int ret;
  236. ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
  237. bus, devfn, where, val);
  238. if (ret != PCIBIOS_SUCCESSFUL) {
  239. *val = 0xffffffff;
  240. return ret;
  241. }
  242. if (size == 1)
  243. *val = (*val >> (8 * (where & 3))) & 0xff;
  244. else if (size == 2)
  245. *val = (*val >> (8 * (where & 2))) & 0xffff;
  246. dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
  247. bus->number, devfn, where, size, (unsigned long)*val);
  248. return ret;
  249. }
  250. /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
  251. static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
  252. int where, int size, u32 val)
  253. {
  254. struct rcar_pcie *pcie = bus->sysdata;
  255. int shift, ret;
  256. u32 data;
  257. ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
  258. bus, devfn, where, &data);
  259. if (ret != PCIBIOS_SUCCESSFUL)
  260. return ret;
  261. dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
  262. bus->number, devfn, where, size, (unsigned long)val);
  263. if (size == 1) {
  264. shift = 8 * (where & 3);
  265. data &= ~(0xff << shift);
  266. data |= ((val & 0xff) << shift);
  267. } else if (size == 2) {
  268. shift = 8 * (where & 2);
  269. data &= ~(0xffff << shift);
  270. data |= ((val & 0xffff) << shift);
  271. } else
  272. data = val;
  273. ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE,
  274. bus, devfn, where, &data);
  275. return ret;
  276. }
  277. static struct pci_ops rcar_pcie_ops = {
  278. .read = rcar_pcie_read_conf,
  279. .write = rcar_pcie_write_conf,
  280. };
  281. static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
  282. struct resource *res)
  283. {
  284. /* Setup PCIe address space mappings for each resource */
  285. resource_size_t size;
  286. resource_size_t res_start;
  287. u32 mask;
  288. rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
  289. /*
  290. * The PAMR mask is calculated in units of 128Bytes, which
  291. * keeps things pretty simple.
  292. */
  293. size = resource_size(res);
  294. mask = (roundup_pow_of_two(size) / SZ_128) - 1;
  295. rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
  296. if (res->flags & IORESOURCE_IO)
  297. res_start = pci_pio_to_address(res->start);
  298. else
  299. res_start = res->start;
  300. rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
  301. rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
  302. PCIEPALR(win));
  303. /* First resource is for IO */
  304. mask = PAR_ENABLE;
  305. if (res->flags & IORESOURCE_IO)
  306. mask |= IO_SPACE;
  307. rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
  308. }
  309. static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
  310. {
  311. struct resource_entry *win;
  312. int i = 0;
  313. /* Setup PCI resources */
  314. resource_list_for_each_entry(win, &pci->resources) {
  315. struct resource *res = win->res;
  316. if (!res->flags)
  317. continue;
  318. switch (resource_type(res)) {
  319. case IORESOURCE_IO:
  320. case IORESOURCE_MEM:
  321. rcar_pcie_setup_window(i, pci, res);
  322. i++;
  323. break;
  324. case IORESOURCE_BUS:
  325. pci->root_bus_nr = res->start;
  326. break;
  327. default:
  328. continue;
  329. }
  330. pci_add_resource(resource, res);
  331. }
  332. return 1;
  333. }
  334. static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
  335. {
  336. struct device *dev = pcie->dev;
  337. unsigned int timeout = 1000;
  338. u32 macsr;
  339. if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
  340. return;
  341. if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
  342. dev_err(dev, "Speed change already in progress\n");
  343. return;
  344. }
  345. macsr = rcar_pci_read_reg(pcie, MACSR);
  346. if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
  347. goto done;
  348. /* Set target link speed to 5.0 GT/s */
  349. rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
  350. PCI_EXP_LNKSTA_CLS_5_0GB);
  351. /* Set speed change reason as intentional factor */
  352. rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
  353. /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
  354. if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
  355. rcar_pci_write_reg(pcie, macsr, MACSR);
  356. /* Start link speed change */
  357. rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
  358. while (timeout--) {
  359. macsr = rcar_pci_read_reg(pcie, MACSR);
  360. if (macsr & SPCHGFIN) {
  361. /* Clear the interrupt bits */
  362. rcar_pci_write_reg(pcie, macsr, MACSR);
  363. if (macsr & SPCHGFAIL)
  364. dev_err(dev, "Speed change failed\n");
  365. goto done;
  366. }
  367. msleep(1);
  368. }
  369. dev_err(dev, "Speed change timed out\n");
  370. done:
  371. dev_info(dev, "Current link speed is %s GT/s\n",
  372. (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
  373. }
  374. static int rcar_pcie_enable(struct rcar_pcie *pcie)
  375. {
  376. struct device *dev = pcie->dev;
  377. struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
  378. struct pci_bus *bus, *child;
  379. int ret;
  380. /* Try setting 5 GT/s link speed */
  381. rcar_pcie_force_speedup(pcie);
  382. rcar_pcie_setup(&bridge->windows, pcie);
  383. pci_add_flags(PCI_REASSIGN_ALL_BUS);
  384. bridge->dev.parent = dev;
  385. bridge->sysdata = pcie;
  386. bridge->busnr = pcie->root_bus_nr;
  387. bridge->ops = &rcar_pcie_ops;
  388. bridge->map_irq = of_irq_parse_and_map_pci;
  389. bridge->swizzle_irq = pci_common_swizzle;
  390. if (IS_ENABLED(CONFIG_PCI_MSI))
  391. bridge->msi = &pcie->msi.chip;
  392. ret = pci_scan_root_bus_bridge(bridge);
  393. if (ret < 0)
  394. return ret;
  395. bus = bridge->bus;
  396. pci_bus_size_bridges(bus);
  397. pci_bus_assign_resources(bus);
  398. list_for_each_entry(child, &bus->children, node)
  399. pcie_bus_configure_settings(child);
  400. pci_bus_add_devices(bus);
  401. return 0;
  402. }
  403. static int phy_wait_for_ack(struct rcar_pcie *pcie)
  404. {
  405. struct device *dev = pcie->dev;
  406. unsigned int timeout = 100;
  407. while (timeout--) {
  408. if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
  409. return 0;
  410. udelay(100);
  411. }
  412. dev_err(dev, "Access to PCIe phy timed out\n");
  413. return -ETIMEDOUT;
  414. }
  415. static void phy_write_reg(struct rcar_pcie *pcie,
  416. unsigned int rate, unsigned int addr,
  417. unsigned int lane, unsigned int data)
  418. {
  419. unsigned long phyaddr;
  420. phyaddr = WRITE_CMD |
  421. ((rate & 1) << RATE_POS) |
  422. ((lane & 0xf) << LANE_POS) |
  423. ((addr & 0xff) << ADR_POS);
  424. /* Set write data */
  425. rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
  426. rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
  427. /* Ignore errors as they will be dealt with if the data link is down */
  428. phy_wait_for_ack(pcie);
  429. /* Clear command */
  430. rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
  431. rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
  432. /* Ignore errors as they will be dealt with if the data link is down */
  433. phy_wait_for_ack(pcie);
  434. }
  435. static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie)
  436. {
  437. unsigned int timeout = 10;
  438. while (timeout--) {
  439. if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY)
  440. return 0;
  441. msleep(5);
  442. }
  443. return -ETIMEDOUT;
  444. }
  445. static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
  446. {
  447. unsigned int timeout = 10000;
  448. while (timeout--) {
  449. if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
  450. return 0;
  451. udelay(5);
  452. cpu_relax();
  453. }
  454. return -ETIMEDOUT;
  455. }
  456. static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
  457. {
  458. int err;
  459. /* Begin initialization */
  460. rcar_pci_write_reg(pcie, 0, PCIETCTLR);
  461. /* Set mode */
  462. rcar_pci_write_reg(pcie, 1, PCIEMSR);
  463. err = rcar_pcie_wait_for_phyrdy(pcie);
  464. if (err)
  465. return err;
  466. /*
  467. * Initial header for port config space is type 1, set the device
  468. * class to match. Hardware takes care of propagating the IDSETR
  469. * settings, so there is no need to bother with a quirk.
  470. */
  471. rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
  472. /*
  473. * Setup Secondary Bus Number & Subordinate Bus Number, even though
  474. * they aren't used, to avoid bridge being detected as broken.
  475. */
  476. rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
  477. rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
  478. /* Initialize default capabilities. */
  479. rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
  480. rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
  481. PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
  482. rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
  483. PCI_HEADER_TYPE_BRIDGE);
  484. /* Enable data link layer active state reporting */
  485. rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
  486. PCI_EXP_LNKCAP_DLLLARC);
  487. /* Write out the physical slot number = 0 */
  488. rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
  489. /* Set the completion timer timeout to the maximum 50ms. */
  490. rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
  491. /* Terminate list of capabilities (Next Capability Offset=0) */
  492. rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
  493. /* Enable MSI */
  494. if (IS_ENABLED(CONFIG_PCI_MSI))
  495. rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
  496. /* Finish initialization - establish a PCI Express link */
  497. rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
  498. /* This will timeout if we don't have a link. */
  499. err = rcar_pcie_wait_for_dl(pcie);
  500. if (err)
  501. return err;
  502. /* Enable INTx interrupts */
  503. rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
  504. wmb();
  505. return 0;
  506. }
  507. static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie)
  508. {
  509. /* Initialize the phy */
  510. phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
  511. phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
  512. phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
  513. phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
  514. phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
  515. phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
  516. phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
  517. phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
  518. phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
  519. phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
  520. phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
  521. phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
  522. phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
  523. phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
  524. phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
  525. return 0;
  526. }
  527. static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie)
  528. {
  529. /*
  530. * These settings come from the R-Car Series, 2nd Generation User's
  531. * Manual, section 50.3.1 (2) Initialization of the physical layer.
  532. */
  533. rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
  534. rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
  535. rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
  536. rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
  537. rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
  538. /* The following value is for DC connection, no termination resistor */
  539. rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
  540. rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
  541. rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
  542. return 0;
  543. }
  544. static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
  545. {
  546. int err;
  547. err = phy_init(pcie->phy);
  548. if (err)
  549. return err;
  550. err = phy_power_on(pcie->phy);
  551. if (err)
  552. phy_exit(pcie->phy);
  553. return err;
  554. }
  555. static int rcar_msi_alloc(struct rcar_msi *chip)
  556. {
  557. int msi;
  558. mutex_lock(&chip->lock);
  559. msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
  560. if (msi < INT_PCI_MSI_NR)
  561. set_bit(msi, chip->used);
  562. else
  563. msi = -ENOSPC;
  564. mutex_unlock(&chip->lock);
  565. return msi;
  566. }
  567. static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
  568. {
  569. int msi;
  570. mutex_lock(&chip->lock);
  571. msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
  572. order_base_2(no_irqs));
  573. mutex_unlock(&chip->lock);
  574. return msi;
  575. }
  576. static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
  577. {
  578. mutex_lock(&chip->lock);
  579. clear_bit(irq, chip->used);
  580. mutex_unlock(&chip->lock);
  581. }
  582. static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
  583. {
  584. struct rcar_pcie *pcie = data;
  585. struct rcar_msi *msi = &pcie->msi;
  586. struct device *dev = pcie->dev;
  587. unsigned long reg;
  588. reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
  589. /* MSI & INTx share an interrupt - we only handle MSI here */
  590. if (!reg)
  591. return IRQ_NONE;
  592. while (reg) {
  593. unsigned int index = find_first_bit(&reg, 32);
  594. unsigned int irq;
  595. /* clear the interrupt */
  596. rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
  597. irq = irq_find_mapping(msi->domain, index);
  598. if (irq) {
  599. if (test_bit(index, msi->used))
  600. generic_handle_irq(irq);
  601. else
  602. dev_info(dev, "unhandled MSI\n");
  603. } else {
  604. /* Unknown MSI, just clear it */
  605. dev_dbg(dev, "unexpected MSI\n");
  606. }
  607. /* see if there's any more pending in this vector */
  608. reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
  609. }
  610. return IRQ_HANDLED;
  611. }
  612. static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
  613. struct msi_desc *desc)
  614. {
  615. struct rcar_msi *msi = to_rcar_msi(chip);
  616. struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
  617. struct msi_msg msg;
  618. unsigned int irq;
  619. int hwirq;
  620. hwirq = rcar_msi_alloc(msi);
  621. if (hwirq < 0)
  622. return hwirq;
  623. irq = irq_find_mapping(msi->domain, hwirq);
  624. if (!irq) {
  625. rcar_msi_free(msi, hwirq);
  626. return -EINVAL;
  627. }
  628. irq_set_msi_desc(irq, desc);
  629. msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
  630. msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
  631. msg.data = hwirq;
  632. pci_write_msi_msg(irq, &msg);
  633. return 0;
  634. }
  635. static int rcar_msi_setup_irqs(struct msi_controller *chip,
  636. struct pci_dev *pdev, int nvec, int type)
  637. {
  638. struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
  639. struct rcar_msi *msi = to_rcar_msi(chip);
  640. struct msi_desc *desc;
  641. struct msi_msg msg;
  642. unsigned int irq;
  643. int hwirq;
  644. int i;
  645. /* MSI-X interrupts are not supported */
  646. if (type == PCI_CAP_ID_MSIX)
  647. return -EINVAL;
  648. WARN_ON(!list_is_singular(&pdev->dev.msi_list));
  649. desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
  650. hwirq = rcar_msi_alloc_region(msi, nvec);
  651. if (hwirq < 0)
  652. return -ENOSPC;
  653. irq = irq_find_mapping(msi->domain, hwirq);
  654. if (!irq)
  655. return -ENOSPC;
  656. for (i = 0; i < nvec; i++) {
  657. /*
  658. * irq_create_mapping() called from rcar_pcie_probe() pre-
  659. * allocates descs, so there is no need to allocate descs here.
  660. * We can therefore assume that if irq_find_mapping() above
  661. * returns non-zero, then the descs are also successfully
  662. * allocated.
  663. */
  664. if (irq_set_msi_desc_off(irq, i, desc)) {
  665. /* TODO: clear */
  666. return -EINVAL;
  667. }
  668. }
  669. desc->nvec_used = nvec;
  670. desc->msi_attrib.multiple = order_base_2(nvec);
  671. msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
  672. msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
  673. msg.data = hwirq;
  674. pci_write_msi_msg(irq, &msg);
  675. return 0;
  676. }
  677. static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
  678. {
  679. struct rcar_msi *msi = to_rcar_msi(chip);
  680. struct irq_data *d = irq_get_irq_data(irq);
  681. rcar_msi_free(msi, d->hwirq);
  682. }
  683. static struct irq_chip rcar_msi_irq_chip = {
  684. .name = "R-Car PCIe MSI",
  685. .irq_enable = pci_msi_unmask_irq,
  686. .irq_disable = pci_msi_mask_irq,
  687. .irq_mask = pci_msi_mask_irq,
  688. .irq_unmask = pci_msi_unmask_irq,
  689. };
  690. static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
  691. irq_hw_number_t hwirq)
  692. {
  693. irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
  694. irq_set_chip_data(irq, domain->host_data);
  695. return 0;
  696. }
  697. static const struct irq_domain_ops msi_domain_ops = {
  698. .map = rcar_msi_map,
  699. };
  700. static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie)
  701. {
  702. struct rcar_msi *msi = &pcie->msi;
  703. int i, irq;
  704. for (i = 0; i < INT_PCI_MSI_NR; i++) {
  705. irq = irq_find_mapping(msi->domain, i);
  706. if (irq > 0)
  707. irq_dispose_mapping(irq);
  708. }
  709. irq_domain_remove(msi->domain);
  710. }
  711. static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
  712. {
  713. struct device *dev = pcie->dev;
  714. struct rcar_msi *msi = &pcie->msi;
  715. phys_addr_t base;
  716. int err, i;
  717. mutex_init(&msi->lock);
  718. msi->chip.dev = dev;
  719. msi->chip.setup_irq = rcar_msi_setup_irq;
  720. msi->chip.setup_irqs = rcar_msi_setup_irqs;
  721. msi->chip.teardown_irq = rcar_msi_teardown_irq;
  722. msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
  723. &msi_domain_ops, &msi->chip);
  724. if (!msi->domain) {
  725. dev_err(dev, "failed to create IRQ domain\n");
  726. return -ENOMEM;
  727. }
  728. for (i = 0; i < INT_PCI_MSI_NR; i++)
  729. irq_create_mapping(msi->domain, i);
  730. /* Two irqs are for MSI, but they are also used for non-MSI irqs */
  731. err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
  732. IRQF_SHARED | IRQF_NO_THREAD,
  733. rcar_msi_irq_chip.name, pcie);
  734. if (err < 0) {
  735. dev_err(dev, "failed to request IRQ: %d\n", err);
  736. goto err;
  737. }
  738. err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
  739. IRQF_SHARED | IRQF_NO_THREAD,
  740. rcar_msi_irq_chip.name, pcie);
  741. if (err < 0) {
  742. dev_err(dev, "failed to request IRQ: %d\n", err);
  743. goto err;
  744. }
  745. /* setup MSI data target */
  746. msi->pages = __get_free_pages(GFP_KERNEL, 0);
  747. if (!msi->pages) {
  748. err = -ENOMEM;
  749. goto err;
  750. }
  751. base = virt_to_phys((void *)msi->pages);
  752. rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
  753. rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
  754. /* enable all MSI interrupts */
  755. rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
  756. return 0;
  757. err:
  758. rcar_pcie_unmap_msi(pcie);
  759. return err;
  760. }
  761. static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie)
  762. {
  763. struct rcar_msi *msi = &pcie->msi;
  764. /* Disable all MSI interrupts */
  765. rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
  766. /* Disable address decoding of the MSI interrupt, MSIFE */
  767. rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
  768. free_pages(msi->pages, 0);
  769. rcar_pcie_unmap_msi(pcie);
  770. }
  771. static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
  772. {
  773. struct device *dev = pcie->dev;
  774. struct resource res;
  775. int err, i;
  776. pcie->phy = devm_phy_optional_get(dev, "pcie");
  777. if (IS_ERR(pcie->phy))
  778. return PTR_ERR(pcie->phy);
  779. err = of_address_to_resource(dev->of_node, 0, &res);
  780. if (err)
  781. return err;
  782. pcie->base = devm_ioremap_resource(dev, &res);
  783. if (IS_ERR(pcie->base))
  784. return PTR_ERR(pcie->base);
  785. pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
  786. if (IS_ERR(pcie->bus_clk)) {
  787. dev_err(dev, "cannot get pcie bus clock\n");
  788. return PTR_ERR(pcie->bus_clk);
  789. }
  790. i = irq_of_parse_and_map(dev->of_node, 0);
  791. if (!i) {
  792. dev_err(dev, "cannot get platform resources for msi interrupt\n");
  793. err = -ENOENT;
  794. goto err_irq1;
  795. }
  796. pcie->msi.irq1 = i;
  797. i = irq_of_parse_and_map(dev->of_node, 1);
  798. if (!i) {
  799. dev_err(dev, "cannot get platform resources for msi interrupt\n");
  800. err = -ENOENT;
  801. goto err_irq2;
  802. }
  803. pcie->msi.irq2 = i;
  804. return 0;
  805. err_irq2:
  806. irq_dispose_mapping(pcie->msi.irq1);
  807. err_irq1:
  808. return err;
  809. }
  810. static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
  811. struct of_pci_range *range,
  812. int *index)
  813. {
  814. u64 restype = range->flags;
  815. u64 cpu_addr = range->cpu_addr;
  816. u64 cpu_end = range->cpu_addr + range->size;
  817. u64 pci_addr = range->pci_addr;
  818. u32 flags = LAM_64BIT | LAR_ENABLE;
  819. u64 mask;
  820. u64 size;
  821. int idx = *index;
  822. if (restype & IORESOURCE_PREFETCH)
  823. flags |= LAM_PREFETCH;
  824. /*
  825. * If the size of the range is larger than the alignment of the start
  826. * address, we have to use multiple entries to perform the mapping.
  827. */
  828. if (cpu_addr > 0) {
  829. unsigned long nr_zeros = __ffs64(cpu_addr);
  830. u64 alignment = 1ULL << nr_zeros;
  831. size = min(range->size, alignment);
  832. } else {
  833. size = range->size;
  834. }
  835. /* Hardware supports max 4GiB inbound region */
  836. size = min(size, 1ULL << 32);
  837. mask = roundup_pow_of_two(size) - 1;
  838. mask &= ~0xf;
  839. while (cpu_addr < cpu_end) {
  840. /*
  841. * Set up 64-bit inbound regions as the range parser doesn't
  842. * distinguish between 32 and 64-bit types.
  843. */
  844. rcar_pci_write_reg(pcie, lower_32_bits(pci_addr),
  845. PCIEPRAR(idx));
  846. rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
  847. rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags,
  848. PCIELAMR(idx));
  849. rcar_pci_write_reg(pcie, upper_32_bits(pci_addr),
  850. PCIEPRAR(idx + 1));
  851. rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr),
  852. PCIELAR(idx + 1));
  853. rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
  854. pci_addr += size;
  855. cpu_addr += size;
  856. idx += 2;
  857. if (idx > MAX_NR_INBOUND_MAPS) {
  858. dev_err(pcie->dev, "Failed to map inbound regions!\n");
  859. return -EINVAL;
  860. }
  861. }
  862. *index = idx;
  863. return 0;
  864. }
  865. static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
  866. struct device_node *np)
  867. {
  868. struct of_pci_range range;
  869. struct of_pci_range_parser parser;
  870. int index = 0;
  871. int err;
  872. if (of_pci_dma_range_parser_init(&parser, np))
  873. return -EINVAL;
  874. /* Get the dma-ranges from DT */
  875. for_each_of_pci_range(&parser, &range) {
  876. u64 end = range.cpu_addr + range.size - 1;
  877. dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
  878. range.flags, range.cpu_addr, end, range.pci_addr);
  879. err = rcar_pcie_inbound_ranges(pcie, &range, &index);
  880. if (err)
  881. return err;
  882. }
  883. return 0;
  884. }
  885. static const struct of_device_id rcar_pcie_of_match[] = {
  886. { .compatible = "renesas,pcie-r8a7779",
  887. .data = rcar_pcie_phy_init_h1 },
  888. { .compatible = "renesas,pcie-r8a7790",
  889. .data = rcar_pcie_phy_init_gen2 },
  890. { .compatible = "renesas,pcie-r8a7791",
  891. .data = rcar_pcie_phy_init_gen2 },
  892. { .compatible = "renesas,pcie-rcar-gen2",
  893. .data = rcar_pcie_phy_init_gen2 },
  894. { .compatible = "renesas,pcie-r8a7795",
  895. .data = rcar_pcie_phy_init_gen3 },
  896. { .compatible = "renesas,pcie-rcar-gen3",
  897. .data = rcar_pcie_phy_init_gen3 },
  898. {},
  899. };
  900. static int rcar_pcie_probe(struct platform_device *pdev)
  901. {
  902. struct device *dev = &pdev->dev;
  903. struct rcar_pcie *pcie;
  904. unsigned int data;
  905. int err;
  906. int (*phy_init_fn)(struct rcar_pcie *);
  907. struct pci_host_bridge *bridge;
  908. bridge = pci_alloc_host_bridge(sizeof(*pcie));
  909. if (!bridge)
  910. return -ENOMEM;
  911. pcie = pci_host_bridge_priv(bridge);
  912. pcie->dev = dev;
  913. platform_set_drvdata(pdev, pcie);
  914. err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
  915. if (err)
  916. goto err_free_bridge;
  917. pm_runtime_enable(pcie->dev);
  918. err = pm_runtime_get_sync(pcie->dev);
  919. if (err < 0) {
  920. dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
  921. goto err_pm_disable;
  922. }
  923. err = rcar_pcie_get_resources(pcie);
  924. if (err < 0) {
  925. dev_err(dev, "failed to request resources: %d\n", err);
  926. goto err_pm_put;
  927. }
  928. err = clk_prepare_enable(pcie->bus_clk);
  929. if (err) {
  930. dev_err(dev, "failed to enable bus clock: %d\n", err);
  931. goto err_unmap_msi_irqs;
  932. }
  933. err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
  934. if (err)
  935. goto err_clk_disable;
  936. phy_init_fn = of_device_get_match_data(dev);
  937. err = phy_init_fn(pcie);
  938. if (err) {
  939. dev_err(dev, "failed to init PCIe PHY\n");
  940. goto err_clk_disable;
  941. }
  942. /* Failure to get a link might just be that no cards are inserted */
  943. if (rcar_pcie_hw_init(pcie)) {
  944. dev_info(dev, "PCIe link down\n");
  945. err = -ENODEV;
  946. goto err_phy_shutdown;
  947. }
  948. data = rcar_pci_read_reg(pcie, MACSR);
  949. dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
  950. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  951. err = rcar_pcie_enable_msi(pcie);
  952. if (err < 0) {
  953. dev_err(dev,
  954. "failed to enable MSI support: %d\n",
  955. err);
  956. goto err_phy_shutdown;
  957. }
  958. }
  959. err = rcar_pcie_enable(pcie);
  960. if (err)
  961. goto err_msi_teardown;
  962. return 0;
  963. err_msi_teardown:
  964. if (IS_ENABLED(CONFIG_PCI_MSI))
  965. rcar_pcie_teardown_msi(pcie);
  966. err_phy_shutdown:
  967. if (pcie->phy) {
  968. phy_power_off(pcie->phy);
  969. phy_exit(pcie->phy);
  970. }
  971. err_clk_disable:
  972. clk_disable_unprepare(pcie->bus_clk);
  973. err_unmap_msi_irqs:
  974. irq_dispose_mapping(pcie->msi.irq2);
  975. irq_dispose_mapping(pcie->msi.irq1);
  976. err_pm_put:
  977. pm_runtime_put(dev);
  978. err_pm_disable:
  979. pm_runtime_disable(dev);
  980. pci_free_resource_list(&pcie->resources);
  981. err_free_bridge:
  982. pci_free_host_bridge(bridge);
  983. return err;
  984. }
  985. static int rcar_pcie_resume_noirq(struct device *dev)
  986. {
  987. struct rcar_pcie *pcie = dev_get_drvdata(dev);
  988. if (rcar_pci_read_reg(pcie, PMSR) &&
  989. !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
  990. return 0;
  991. /* Re-establish the PCIe link */
  992. rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
  993. return rcar_pcie_wait_for_dl(pcie);
  994. }
  995. static const struct dev_pm_ops rcar_pcie_pm_ops = {
  996. .resume_noirq = rcar_pcie_resume_noirq,
  997. };
  998. static struct platform_driver rcar_pcie_driver = {
  999. .driver = {
  1000. .name = "rcar-pcie",
  1001. .of_match_table = rcar_pcie_of_match,
  1002. .pm = &rcar_pcie_pm_ops,
  1003. .suppress_bind_attrs = true,
  1004. },
  1005. .probe = rcar_pcie_probe,
  1006. };
  1007. builtin_platform_driver(rcar_pcie_driver);