pcie-rcar.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. /*
  2. * PCIe driver for Renesas R-Car SoCs
  3. * Copyright (C) 2014 Renesas Electronics Europe Ltd
  4. *
  5. * Based on:
  6. * arch/sh/drivers/pci/pcie-sh7786.c
  7. * arch/sh/drivers/pci/ops-sh7786.c
  8. * Copyright (C) 2009 - 2011 Paul Mundt
  9. *
  10. * Author: Phil Edworthy <phil.edworthy@renesas.com>
  11. *
  12. * This file is licensed under the terms of the GNU General Public
  13. * License version 2. This program is licensed "as is" without any
  14. * warranty of any kind, whether express or implied.
  15. */
  16. #include <linux/clk.h>
  17. #include <linux/delay.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/irq.h>
  20. #include <linux/irqdomain.h>
  21. #include <linux/kernel.h>
  22. #include <linux/init.h>
  23. #include <linux/msi.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/of_pci.h>
  27. #include <linux/of_platform.h>
  28. #include <linux/pci.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/pm_runtime.h>
  31. #include <linux/slab.h>
  32. #define DRV_NAME "rcar-pcie"
  33. #define PCIECAR 0x000010
  34. #define PCIECCTLR 0x000018
  35. #define CONFIG_SEND_ENABLE (1 << 31)
  36. #define TYPE0 (0 << 8)
  37. #define TYPE1 (1 << 8)
  38. #define PCIECDR 0x000020
  39. #define PCIEMSR 0x000028
  40. #define PCIEINTXR 0x000400
  41. #define PCIEMSITXR 0x000840
  42. /* Transfer control */
  43. #define PCIETCTLR 0x02000
  44. #define CFINIT 1
  45. #define PCIETSTR 0x02004
  46. #define DATA_LINK_ACTIVE 1
  47. #define PCIEERRFR 0x02020
  48. #define UNSUPPORTED_REQUEST (1 << 4)
  49. #define PCIEMSIFR 0x02044
  50. #define PCIEMSIALR 0x02048
  51. #define MSIFE 1
  52. #define PCIEMSIAUR 0x0204c
  53. #define PCIEMSIIER 0x02050
  54. /* root port address */
  55. #define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
  56. /* local address reg & mask */
  57. #define PCIELAR(x) (0x02200 + ((x) * 0x20))
  58. #define PCIELAMR(x) (0x02208 + ((x) * 0x20))
  59. #define LAM_PREFETCH (1 << 3)
  60. #define LAM_64BIT (1 << 2)
  61. #define LAR_ENABLE (1 << 1)
  62. /* PCIe address reg & mask */
  63. #define PCIEPALR(x) (0x03400 + ((x) * 0x20))
  64. #define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
  65. #define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
  66. #define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
  67. #define PAR_ENABLE (1 << 31)
  68. #define IO_SPACE (1 << 8)
  69. /* Configuration */
  70. #define PCICONF(x) (0x010000 + ((x) * 0x4))
  71. #define PMCAP(x) (0x010040 + ((x) * 0x4))
  72. #define EXPCAP(x) (0x010070 + ((x) * 0x4))
  73. #define VCCAP(x) (0x010100 + ((x) * 0x4))
  74. /* link layer */
  75. #define IDSETR1 0x011004
  76. #define TLCTLR 0x011048
  77. #define MACSR 0x011054
  78. #define MACCTLR 0x011058
  79. #define SCRAMBLE_DISABLE (1 << 27)
  80. /* R-Car H1 PHY */
  81. #define H1_PCIEPHYADRR 0x04000c
  82. #define WRITE_CMD (1 << 16)
  83. #define PHY_ACK (1 << 24)
  84. #define RATE_POS 12
  85. #define LANE_POS 8
  86. #define ADR_POS 0
  87. #define H1_PCIEPHYDOUTR 0x040014
  88. #define H1_PCIEPHYSR 0x040018
  89. /* R-Car Gen2 PHY */
  90. #define GEN2_PCIEPHYADDR 0x780
  91. #define GEN2_PCIEPHYDATA 0x784
  92. #define GEN2_PCIEPHYCTRL 0x78c
  93. #define INT_PCI_MSI_NR 32
  94. #define RCONF(x) (PCICONF(0)+(x))
  95. #define RPMCAP(x) (PMCAP(0)+(x))
  96. #define REXPCAP(x) (EXPCAP(0)+(x))
  97. #define RVCCAP(x) (VCCAP(0)+(x))
  98. #define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
  99. #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
  100. #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
  101. #define RCAR_PCI_MAX_RESOURCES 4
  102. #define MAX_NR_INBOUND_MAPS 6
  103. struct rcar_msi {
  104. DECLARE_BITMAP(used, INT_PCI_MSI_NR);
  105. struct irq_domain *domain;
  106. struct msi_controller chip;
  107. unsigned long pages;
  108. struct mutex lock;
  109. int irq1;
  110. int irq2;
  111. };
  112. static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
  113. {
  114. return container_of(chip, struct rcar_msi, chip);
  115. }
  116. /* Structure representing the PCIe interface */
  117. struct rcar_pcie {
  118. struct device *dev;
  119. void __iomem *base;
  120. struct list_head resources;
  121. int root_bus_nr;
  122. struct clk *clk;
  123. struct clk *bus_clk;
  124. struct rcar_msi msi;
  125. };
  126. static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
  127. unsigned long reg)
  128. {
  129. writel(val, pcie->base + reg);
  130. }
  131. static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie,
  132. unsigned long reg)
  133. {
  134. return readl(pcie->base + reg);
  135. }
  136. enum {
  137. RCAR_PCI_ACCESS_READ,
  138. RCAR_PCI_ACCESS_WRITE,
  139. };
  140. static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
  141. {
  142. int shift = 8 * (where & 3);
  143. u32 val = rcar_pci_read_reg(pcie, where & ~3);
  144. val &= ~(mask << shift);
  145. val |= data << shift;
  146. rcar_pci_write_reg(pcie, val, where & ~3);
  147. }
  148. static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
  149. {
  150. int shift = 8 * (where & 3);
  151. u32 val = rcar_pci_read_reg(pcie, where & ~3);
  152. return val >> shift;
  153. }
  154. /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
  155. static int rcar_pcie_config_access(struct rcar_pcie *pcie,
  156. unsigned char access_type, struct pci_bus *bus,
  157. unsigned int devfn, int where, u32 *data)
  158. {
  159. int dev, func, reg, index;
  160. dev = PCI_SLOT(devfn);
  161. func = PCI_FUNC(devfn);
  162. reg = where & ~3;
  163. index = reg / 4;
  164. /*
  165. * While each channel has its own memory-mapped extended config
  166. * space, it's generally only accessible when in endpoint mode.
  167. * When in root complex mode, the controller is unable to target
  168. * itself with either type 0 or type 1 accesses, and indeed, any
  169. * controller initiated target transfer to its own config space
  170. * result in a completer abort.
  171. *
  172. * Each channel effectively only supports a single device, but as
  173. * the same channel <-> device access works for any PCI_SLOT()
  174. * value, we cheat a bit here and bind the controller's config
  175. * space to devfn 0 in order to enable self-enumeration. In this
  176. * case the regular ECAR/ECDR path is sidelined and the mangled
  177. * config access itself is initiated as an internal bus transaction.
  178. */
  179. if (pci_is_root_bus(bus)) {
  180. if (dev != 0)
  181. return PCIBIOS_DEVICE_NOT_FOUND;
  182. if (access_type == RCAR_PCI_ACCESS_READ) {
  183. *data = rcar_pci_read_reg(pcie, PCICONF(index));
  184. } else {
  185. /* Keep an eye out for changes to the root bus number */
  186. if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
  187. pcie->root_bus_nr = *data & 0xff;
  188. rcar_pci_write_reg(pcie, *data, PCICONF(index));
  189. }
  190. return PCIBIOS_SUCCESSFUL;
  191. }
  192. if (pcie->root_bus_nr < 0)
  193. return PCIBIOS_DEVICE_NOT_FOUND;
  194. /* Clear errors */
  195. rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
  196. /* Set the PIO address */
  197. rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
  198. PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
  199. /* Enable the configuration access */
  200. if (bus->parent->number == pcie->root_bus_nr)
  201. rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
  202. else
  203. rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
  204. /* Check for errors */
  205. if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
  206. return PCIBIOS_DEVICE_NOT_FOUND;
  207. /* Check for master and target aborts */
  208. if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
  209. (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
  210. return PCIBIOS_DEVICE_NOT_FOUND;
  211. if (access_type == RCAR_PCI_ACCESS_READ)
  212. *data = rcar_pci_read_reg(pcie, PCIECDR);
  213. else
  214. rcar_pci_write_reg(pcie, *data, PCIECDR);
  215. /* Disable the configuration access */
  216. rcar_pci_write_reg(pcie, 0, PCIECCTLR);
  217. return PCIBIOS_SUCCESSFUL;
  218. }
  219. static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
  220. int where, int size, u32 *val)
  221. {
  222. struct rcar_pcie *pcie = bus->sysdata;
  223. int ret;
  224. ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
  225. bus, devfn, where, val);
  226. if (ret != PCIBIOS_SUCCESSFUL) {
  227. *val = 0xffffffff;
  228. return ret;
  229. }
  230. if (size == 1)
  231. *val = (*val >> (8 * (where & 3))) & 0xff;
  232. else if (size == 2)
  233. *val = (*val >> (8 * (where & 2))) & 0xffff;
  234. dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
  235. bus->number, devfn, where, size, (unsigned long)*val);
  236. return ret;
  237. }
  238. /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
  239. static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
  240. int where, int size, u32 val)
  241. {
  242. struct rcar_pcie *pcie = bus->sysdata;
  243. int shift, ret;
  244. u32 data;
  245. ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
  246. bus, devfn, where, &data);
  247. if (ret != PCIBIOS_SUCCESSFUL)
  248. return ret;
  249. dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
  250. bus->number, devfn, where, size, (unsigned long)val);
  251. if (size == 1) {
  252. shift = 8 * (where & 3);
  253. data &= ~(0xff << shift);
  254. data |= ((val & 0xff) << shift);
  255. } else if (size == 2) {
  256. shift = 8 * (where & 2);
  257. data &= ~(0xffff << shift);
  258. data |= ((val & 0xffff) << shift);
  259. } else
  260. data = val;
  261. ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE,
  262. bus, devfn, where, &data);
  263. return ret;
  264. }
  265. static struct pci_ops rcar_pcie_ops = {
  266. .read = rcar_pcie_read_conf,
  267. .write = rcar_pcie_write_conf,
  268. };
  269. static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
  270. struct resource *res)
  271. {
  272. /* Setup PCIe address space mappings for each resource */
  273. resource_size_t size;
  274. resource_size_t res_start;
  275. u32 mask;
  276. rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
  277. /*
  278. * The PAMR mask is calculated in units of 128Bytes, which
  279. * keeps things pretty simple.
  280. */
  281. size = resource_size(res);
  282. mask = (roundup_pow_of_two(size) / SZ_128) - 1;
  283. rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
  284. if (res->flags & IORESOURCE_IO)
  285. res_start = pci_pio_to_address(res->start);
  286. else
  287. res_start = res->start;
  288. rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
  289. rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
  290. PCIEPALR(win));
  291. /* First resource is for IO */
  292. mask = PAR_ENABLE;
  293. if (res->flags & IORESOURCE_IO)
  294. mask |= IO_SPACE;
  295. rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
  296. }
  297. static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
  298. {
  299. struct resource_entry *win;
  300. int i = 0;
  301. /* Setup PCI resources */
  302. resource_list_for_each_entry(win, &pci->resources) {
  303. struct resource *res = win->res;
  304. if (!res->flags)
  305. continue;
  306. switch (resource_type(res)) {
  307. case IORESOURCE_IO:
  308. case IORESOURCE_MEM:
  309. rcar_pcie_setup_window(i, pci, res);
  310. i++;
  311. break;
  312. case IORESOURCE_BUS:
  313. pci->root_bus_nr = res->start;
  314. break;
  315. default:
  316. continue;
  317. }
  318. pci_add_resource(resource, res);
  319. }
  320. return 1;
  321. }
  322. static int rcar_pcie_enable(struct rcar_pcie *pcie)
  323. {
  324. struct pci_bus *bus, *child;
  325. LIST_HEAD(res);
  326. rcar_pcie_setup(&res, pcie);
  327. pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
  328. if (IS_ENABLED(CONFIG_PCI_MSI))
  329. bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
  330. &rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
  331. else
  332. bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr,
  333. &rcar_pcie_ops, pcie, &res);
  334. if (!bus) {
  335. dev_err(pcie->dev, "Scanning rootbus failed");
  336. return -ENODEV;
  337. }
  338. pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
  339. pci_bus_size_bridges(bus);
  340. pci_bus_assign_resources(bus);
  341. list_for_each_entry(child, &bus->children, node)
  342. pcie_bus_configure_settings(child);
  343. pci_bus_add_devices(bus);
  344. return 0;
  345. }
  346. static int phy_wait_for_ack(struct rcar_pcie *pcie)
  347. {
  348. unsigned int timeout = 100;
  349. while (timeout--) {
  350. if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
  351. return 0;
  352. udelay(100);
  353. }
  354. dev_err(pcie->dev, "Access to PCIe phy timed out\n");
  355. return -ETIMEDOUT;
  356. }
  357. static void phy_write_reg(struct rcar_pcie *pcie,
  358. unsigned int rate, unsigned int addr,
  359. unsigned int lane, unsigned int data)
  360. {
  361. unsigned long phyaddr;
  362. phyaddr = WRITE_CMD |
  363. ((rate & 1) << RATE_POS) |
  364. ((lane & 0xf) << LANE_POS) |
  365. ((addr & 0xff) << ADR_POS);
  366. /* Set write data */
  367. rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
  368. rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
  369. /* Ignore errors as they will be dealt with if the data link is down */
  370. phy_wait_for_ack(pcie);
  371. /* Clear command */
  372. rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
  373. rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
  374. /* Ignore errors as they will be dealt with if the data link is down */
  375. phy_wait_for_ack(pcie);
  376. }
  377. static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
  378. {
  379. unsigned int timeout = 10;
  380. while (timeout--) {
  381. if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
  382. return 0;
  383. msleep(5);
  384. }
  385. return -ETIMEDOUT;
  386. }
  387. static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
  388. {
  389. int err;
  390. /* Begin initialization */
  391. rcar_pci_write_reg(pcie, 0, PCIETCTLR);
  392. /* Set mode */
  393. rcar_pci_write_reg(pcie, 1, PCIEMSR);
  394. /*
  395. * Initial header for port config space is type 1, set the device
  396. * class to match. Hardware takes care of propagating the IDSETR
  397. * settings, so there is no need to bother with a quirk.
  398. */
  399. rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
  400. /*
  401. * Setup Secondary Bus Number & Subordinate Bus Number, even though
  402. * they aren't used, to avoid bridge being detected as broken.
  403. */
  404. rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
  405. rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
  406. /* Initialize default capabilities. */
  407. rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
  408. rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
  409. PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
  410. rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
  411. PCI_HEADER_TYPE_BRIDGE);
  412. /* Enable data link layer active state reporting */
  413. rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
  414. PCI_EXP_LNKCAP_DLLLARC);
  415. /* Write out the physical slot number = 0 */
  416. rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
  417. /* Set the completion timer timeout to the maximum 50ms. */
  418. rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
  419. /* Terminate list of capabilities (Next Capability Offset=0) */
  420. rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
  421. /* Enable MSI */
  422. if (IS_ENABLED(CONFIG_PCI_MSI))
  423. rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
  424. /* Finish initialization - establish a PCI Express link */
  425. rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
  426. /* This will timeout if we don't have a link. */
  427. err = rcar_pcie_wait_for_dl(pcie);
  428. if (err)
  429. return err;
  430. /* Enable INTx interrupts */
  431. rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
  432. wmb();
  433. return 0;
  434. }
  435. static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
  436. {
  437. unsigned int timeout = 10;
  438. /* Initialize the phy */
  439. phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
  440. phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
  441. phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
  442. phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
  443. phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
  444. phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
  445. phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
  446. phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
  447. phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
  448. phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
  449. phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
  450. phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
  451. phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
  452. phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
  453. phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
  454. while (timeout--) {
  455. if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
  456. return rcar_pcie_hw_init(pcie);
  457. msleep(5);
  458. }
  459. return -ETIMEDOUT;
  460. }
  461. static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie)
  462. {
  463. /*
  464. * These settings come from the R-Car Series, 2nd Generation User's
  465. * Manual, section 50.3.1 (2) Initialization of the physical layer.
  466. */
  467. rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
  468. rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
  469. rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
  470. rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
  471. rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
  472. /* The following value is for DC connection, no termination resistor */
  473. rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
  474. rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
  475. rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
  476. return rcar_pcie_hw_init(pcie);
  477. }
  478. static int rcar_msi_alloc(struct rcar_msi *chip)
  479. {
  480. int msi;
  481. mutex_lock(&chip->lock);
  482. msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
  483. if (msi < INT_PCI_MSI_NR)
  484. set_bit(msi, chip->used);
  485. else
  486. msi = -ENOSPC;
  487. mutex_unlock(&chip->lock);
  488. return msi;
  489. }
  490. static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
  491. {
  492. mutex_lock(&chip->lock);
  493. clear_bit(irq, chip->used);
  494. mutex_unlock(&chip->lock);
  495. }
  496. static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
  497. {
  498. struct rcar_pcie *pcie = data;
  499. struct rcar_msi *msi = &pcie->msi;
  500. unsigned long reg;
  501. reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
  502. /* MSI & INTx share an interrupt - we only handle MSI here */
  503. if (!reg)
  504. return IRQ_NONE;
  505. while (reg) {
  506. unsigned int index = find_first_bit(&reg, 32);
  507. unsigned int irq;
  508. /* clear the interrupt */
  509. rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
  510. irq = irq_find_mapping(msi->domain, index);
  511. if (irq) {
  512. if (test_bit(index, msi->used))
  513. generic_handle_irq(irq);
  514. else
  515. dev_info(pcie->dev, "unhandled MSI\n");
  516. } else {
  517. /* Unknown MSI, just clear it */
  518. dev_dbg(pcie->dev, "unexpected MSI\n");
  519. }
  520. /* see if there's any more pending in this vector */
  521. reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
  522. }
  523. return IRQ_HANDLED;
  524. }
  525. static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
  526. struct msi_desc *desc)
  527. {
  528. struct rcar_msi *msi = to_rcar_msi(chip);
  529. struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
  530. struct msi_msg msg;
  531. unsigned int irq;
  532. int hwirq;
  533. hwirq = rcar_msi_alloc(msi);
  534. if (hwirq < 0)
  535. return hwirq;
  536. irq = irq_create_mapping(msi->domain, hwirq);
  537. if (!irq) {
  538. rcar_msi_free(msi, hwirq);
  539. return -EINVAL;
  540. }
  541. irq_set_msi_desc(irq, desc);
  542. msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
  543. msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
  544. msg.data = hwirq;
  545. pci_write_msi_msg(irq, &msg);
  546. return 0;
  547. }
  548. static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
  549. {
  550. struct rcar_msi *msi = to_rcar_msi(chip);
  551. struct irq_data *d = irq_get_irq_data(irq);
  552. rcar_msi_free(msi, d->hwirq);
  553. }
  554. static struct irq_chip rcar_msi_irq_chip = {
  555. .name = "R-Car PCIe MSI",
  556. .irq_enable = pci_msi_unmask_irq,
  557. .irq_disable = pci_msi_mask_irq,
  558. .irq_mask = pci_msi_mask_irq,
  559. .irq_unmask = pci_msi_unmask_irq,
  560. };
  561. static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
  562. irq_hw_number_t hwirq)
  563. {
  564. irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
  565. irq_set_chip_data(irq, domain->host_data);
  566. return 0;
  567. }
  568. static const struct irq_domain_ops msi_domain_ops = {
  569. .map = rcar_msi_map,
  570. };
  571. static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
  572. {
  573. struct platform_device *pdev = to_platform_device(pcie->dev);
  574. struct rcar_msi *msi = &pcie->msi;
  575. unsigned long base;
  576. int err;
  577. mutex_init(&msi->lock);
  578. msi->chip.dev = pcie->dev;
  579. msi->chip.setup_irq = rcar_msi_setup_irq;
  580. msi->chip.teardown_irq = rcar_msi_teardown_irq;
  581. msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
  582. &msi_domain_ops, &msi->chip);
  583. if (!msi->domain) {
  584. dev_err(&pdev->dev, "failed to create IRQ domain\n");
  585. return -ENOMEM;
  586. }
  587. /* Two irqs are for MSI, but they are also used for non-MSI irqs */
  588. err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
  589. IRQF_SHARED | IRQF_NO_THREAD,
  590. rcar_msi_irq_chip.name, pcie);
  591. if (err < 0) {
  592. dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
  593. goto err;
  594. }
  595. err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
  596. IRQF_SHARED | IRQF_NO_THREAD,
  597. rcar_msi_irq_chip.name, pcie);
  598. if (err < 0) {
  599. dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
  600. goto err;
  601. }
  602. /* setup MSI data target */
  603. msi->pages = __get_free_pages(GFP_KERNEL, 0);
  604. base = virt_to_phys((void *)msi->pages);
  605. rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
  606. rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
  607. /* enable all MSI interrupts */
  608. rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
  609. return 0;
  610. err:
  611. irq_domain_remove(msi->domain);
  612. return err;
  613. }
  614. static int rcar_pcie_get_resources(struct platform_device *pdev,
  615. struct rcar_pcie *pcie)
  616. {
  617. struct resource res;
  618. int err, i;
  619. err = of_address_to_resource(pdev->dev.of_node, 0, &res);
  620. if (err)
  621. return err;
  622. pcie->clk = devm_clk_get(&pdev->dev, "pcie");
  623. if (IS_ERR(pcie->clk)) {
  624. dev_err(pcie->dev, "cannot get platform clock\n");
  625. return PTR_ERR(pcie->clk);
  626. }
  627. err = clk_prepare_enable(pcie->clk);
  628. if (err)
  629. goto fail_clk;
  630. pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
  631. if (IS_ERR(pcie->bus_clk)) {
  632. dev_err(pcie->dev, "cannot get pcie bus clock\n");
  633. err = PTR_ERR(pcie->bus_clk);
  634. goto fail_clk;
  635. }
  636. err = clk_prepare_enable(pcie->bus_clk);
  637. if (err)
  638. goto err_map_reg;
  639. i = irq_of_parse_and_map(pdev->dev.of_node, 0);
  640. if (!i) {
  641. dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
  642. err = -ENOENT;
  643. goto err_map_reg;
  644. }
  645. pcie->msi.irq1 = i;
  646. i = irq_of_parse_and_map(pdev->dev.of_node, 1);
  647. if (!i) {
  648. dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
  649. err = -ENOENT;
  650. goto err_map_reg;
  651. }
  652. pcie->msi.irq2 = i;
  653. pcie->base = devm_ioremap_resource(&pdev->dev, &res);
  654. if (IS_ERR(pcie->base)) {
  655. err = PTR_ERR(pcie->base);
  656. goto err_map_reg;
  657. }
  658. return 0;
  659. err_map_reg:
  660. clk_disable_unprepare(pcie->bus_clk);
  661. fail_clk:
  662. clk_disable_unprepare(pcie->clk);
  663. return err;
  664. }
  665. static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
  666. struct of_pci_range *range,
  667. int *index)
  668. {
  669. u64 restype = range->flags;
  670. u64 cpu_addr = range->cpu_addr;
  671. u64 cpu_end = range->cpu_addr + range->size;
  672. u64 pci_addr = range->pci_addr;
  673. u32 flags = LAM_64BIT | LAR_ENABLE;
  674. u64 mask;
  675. u64 size;
  676. int idx = *index;
  677. if (restype & IORESOURCE_PREFETCH)
  678. flags |= LAM_PREFETCH;
  679. /*
  680. * If the size of the range is larger than the alignment of the start
  681. * address, we have to use multiple entries to perform the mapping.
  682. */
  683. if (cpu_addr > 0) {
  684. unsigned long nr_zeros = __ffs64(cpu_addr);
  685. u64 alignment = 1ULL << nr_zeros;
  686. size = min(range->size, alignment);
  687. } else {
  688. size = range->size;
  689. }
  690. /* Hardware supports max 4GiB inbound region */
  691. size = min(size, 1ULL << 32);
  692. mask = roundup_pow_of_two(size) - 1;
  693. mask &= ~0xf;
  694. while (cpu_addr < cpu_end) {
  695. /*
  696. * Set up 64-bit inbound regions as the range parser doesn't
  697. * distinguish between 32 and 64-bit types.
  698. */
  699. rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
  700. rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
  701. rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
  702. rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
  703. rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
  704. rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
  705. pci_addr += size;
  706. cpu_addr += size;
  707. idx += 2;
  708. if (idx > MAX_NR_INBOUND_MAPS) {
  709. dev_err(pcie->dev, "Failed to map inbound regions!\n");
  710. return -EINVAL;
  711. }
  712. }
  713. *index = idx;
  714. return 0;
  715. }
  716. static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
  717. struct device_node *node)
  718. {
  719. const int na = 3, ns = 2;
  720. int rlen;
  721. parser->node = node;
  722. parser->pna = of_n_addr_cells(node);
  723. parser->np = parser->pna + na + ns;
  724. parser->range = of_get_property(node, "dma-ranges", &rlen);
  725. if (!parser->range)
  726. return -ENOENT;
  727. parser->end = parser->range + rlen / sizeof(__be32);
  728. return 0;
  729. }
  730. static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
  731. struct device_node *np)
  732. {
  733. struct of_pci_range range;
  734. struct of_pci_range_parser parser;
  735. int index = 0;
  736. int err;
  737. if (pci_dma_range_parser_init(&parser, np))
  738. return -EINVAL;
  739. /* Get the dma-ranges from DT */
  740. for_each_of_pci_range(&parser, &range) {
  741. u64 end = range.cpu_addr + range.size - 1;
  742. dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
  743. range.flags, range.cpu_addr, end, range.pci_addr);
  744. err = rcar_pcie_inbound_ranges(pcie, &range, &index);
  745. if (err)
  746. return err;
  747. }
  748. return 0;
  749. }
  750. static const struct of_device_id rcar_pcie_of_match[] = {
  751. { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
  752. { .compatible = "renesas,pcie-rcar-gen2", .data = rcar_pcie_hw_init_gen2 },
  753. { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init_gen2 },
  754. { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init_gen2 },
  755. { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
  756. {},
  757. };
  758. static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
  759. {
  760. int err;
  761. struct device *dev = pci->dev;
  762. struct device_node *np = dev->of_node;
  763. resource_size_t iobase;
  764. struct resource_entry *win;
  765. err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, &iobase);
  766. if (err)
  767. return err;
  768. err = devm_request_pci_bus_resources(dev, &pci->resources);
  769. if (err)
  770. goto out_release_res;
  771. resource_list_for_each_entry(win, &pci->resources) {
  772. struct resource *res = win->res;
  773. if (resource_type(res) == IORESOURCE_IO) {
  774. err = pci_remap_iospace(res, iobase);
  775. if (err)
  776. dev_warn(dev, "error %d: failed to map resource %pR\n",
  777. err, res);
  778. }
  779. }
  780. return 0;
  781. out_release_res:
  782. pci_free_resource_list(&pci->resources);
  783. return err;
  784. }
  785. static int rcar_pcie_probe(struct platform_device *pdev)
  786. {
  787. struct rcar_pcie *pcie;
  788. unsigned int data;
  789. const struct of_device_id *of_id;
  790. int err;
  791. int (*hw_init_fn)(struct rcar_pcie *);
  792. pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
  793. if (!pcie)
  794. return -ENOMEM;
  795. pcie->dev = &pdev->dev;
  796. platform_set_drvdata(pdev, pcie);
  797. INIT_LIST_HEAD(&pcie->resources);
  798. rcar_pcie_parse_request_of_pci_ranges(pcie);
  799. err = rcar_pcie_get_resources(pdev, pcie);
  800. if (err < 0) {
  801. dev_err(&pdev->dev, "failed to request resources: %d\n", err);
  802. return err;
  803. }
  804. err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
  805. if (err)
  806. return err;
  807. of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
  808. if (!of_id || !of_id->data)
  809. return -EINVAL;
  810. hw_init_fn = of_id->data;
  811. pm_runtime_enable(pcie->dev);
  812. err = pm_runtime_get_sync(pcie->dev);
  813. if (err < 0) {
  814. dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
  815. goto err_pm_disable;
  816. }
  817. /* Failure to get a link might just be that no cards are inserted */
  818. err = hw_init_fn(pcie);
  819. if (err) {
  820. dev_info(&pdev->dev, "PCIe link down\n");
  821. err = 0;
  822. goto err_pm_put;
  823. }
  824. data = rcar_pci_read_reg(pcie, MACSR);
  825. dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
  826. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  827. err = rcar_pcie_enable_msi(pcie);
  828. if (err < 0) {
  829. dev_err(&pdev->dev,
  830. "failed to enable MSI support: %d\n",
  831. err);
  832. goto err_pm_put;
  833. }
  834. }
  835. err = rcar_pcie_enable(pcie);
  836. if (err)
  837. goto err_pm_put;
  838. return 0;
  839. err_pm_put:
  840. pm_runtime_put(pcie->dev);
  841. err_pm_disable:
  842. pm_runtime_disable(pcie->dev);
  843. return err;
  844. }
  845. static struct platform_driver rcar_pcie_driver = {
  846. .driver = {
  847. .name = DRV_NAME,
  848. .of_match_table = rcar_pcie_of_match,
  849. .suppress_bind_attrs = true,
  850. },
  851. .probe = rcar_pcie_probe,
  852. };
  853. builtin_platform_driver(rcar_pcie_driver);