pcie-rcar.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCIe driver for Renesas R-Car SoCs
  4. * Copyright (C) 2014 Renesas Electronics Europe Ltd
  5. *
  6. * Based on:
  7. * arch/sh/drivers/pci/pcie-sh7786.c
  8. * arch/sh/drivers/pci/ops-sh7786.c
  9. * Copyright (C) 2009 - 2011 Paul Mundt
  10. *
  11. * Author: Phil Edworthy <phil.edworthy@renesas.com>
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/irq.h>
  17. #include <linux/irqdomain.h>
  18. #include <linux/kernel.h>
  19. #include <linux/init.h>
  20. #include <linux/msi.h>
  21. #include <linux/of_address.h>
  22. #include <linux/of_irq.h>
  23. #include <linux/of_pci.h>
  24. #include <linux/of_platform.h>
  25. #include <linux/pci.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/pm_runtime.h>
  28. #include <linux/slab.h>
  29. #define PCIECAR 0x000010
  30. #define PCIECCTLR 0x000018
  31. #define CONFIG_SEND_ENABLE (1 << 31)
  32. #define TYPE0 (0 << 8)
  33. #define TYPE1 (1 << 8)
  34. #define PCIECDR 0x000020
  35. #define PCIEMSR 0x000028
  36. #define PCIEINTXR 0x000400
  37. #define PCIEMSITXR 0x000840
  38. /* Transfer control */
  39. #define PCIETCTLR 0x02000
  40. #define CFINIT 1
  41. #define PCIETSTR 0x02004
  42. #define DATA_LINK_ACTIVE 1
  43. #define PCIEERRFR 0x02020
  44. #define UNSUPPORTED_REQUEST (1 << 4)
  45. #define PCIEMSIFR 0x02044
  46. #define PCIEMSIALR 0x02048
  47. #define MSIFE 1
  48. #define PCIEMSIAUR 0x0204c
  49. #define PCIEMSIIER 0x02050
  50. /* root port address */
  51. #define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
  52. /* local address reg & mask */
  53. #define PCIELAR(x) (0x02200 + ((x) * 0x20))
  54. #define PCIELAMR(x) (0x02208 + ((x) * 0x20))
  55. #define LAM_PREFETCH (1 << 3)
  56. #define LAM_64BIT (1 << 2)
  57. #define LAR_ENABLE (1 << 1)
  58. /* PCIe address reg & mask */
  59. #define PCIEPALR(x) (0x03400 + ((x) * 0x20))
  60. #define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
  61. #define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
  62. #define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
  63. #define PAR_ENABLE (1 << 31)
  64. #define IO_SPACE (1 << 8)
  65. /* Configuration */
  66. #define PCICONF(x) (0x010000 + ((x) * 0x4))
  67. #define PMCAP(x) (0x010040 + ((x) * 0x4))
  68. #define EXPCAP(x) (0x010070 + ((x) * 0x4))
  69. #define VCCAP(x) (0x010100 + ((x) * 0x4))
  70. /* link layer */
  71. #define IDSETR1 0x011004
  72. #define TLCTLR 0x011048
  73. #define MACSR 0x011054
  74. #define SPCHGFIN (1 << 4)
  75. #define SPCHGFAIL (1 << 6)
  76. #define SPCHGSUC (1 << 7)
  77. #define LINK_SPEED (0xf << 16)
  78. #define LINK_SPEED_2_5GTS (1 << 16)
  79. #define LINK_SPEED_5_0GTS (2 << 16)
  80. #define MACCTLR 0x011058
  81. #define SPEED_CHANGE (1 << 24)
  82. #define SCRAMBLE_DISABLE (1 << 27)
  83. #define MACS2R 0x011078
  84. #define MACCGSPSETR 0x011084
  85. #define SPCNGRSN (1 << 31)
  86. /* R-Car H1 PHY */
  87. #define H1_PCIEPHYADRR 0x04000c
  88. #define WRITE_CMD (1 << 16)
  89. #define PHY_ACK (1 << 24)
  90. #define RATE_POS 12
  91. #define LANE_POS 8
  92. #define ADR_POS 0
  93. #define H1_PCIEPHYDOUTR 0x040014
  94. #define H1_PCIEPHYSR 0x040018
  95. /* R-Car Gen2 PHY */
  96. #define GEN2_PCIEPHYADDR 0x780
  97. #define GEN2_PCIEPHYDATA 0x784
  98. #define GEN2_PCIEPHYCTRL 0x78c
  99. #define INT_PCI_MSI_NR 32
  100. #define RCONF(x) (PCICONF(0)+(x))
  101. #define RPMCAP(x) (PMCAP(0)+(x))
  102. #define REXPCAP(x) (EXPCAP(0)+(x))
  103. #define RVCCAP(x) (VCCAP(0)+(x))
  104. #define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
  105. #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
  106. #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
  107. #define RCAR_PCI_MAX_RESOURCES 4
  108. #define MAX_NR_INBOUND_MAPS 6
  109. struct rcar_msi {
  110. DECLARE_BITMAP(used, INT_PCI_MSI_NR);
  111. struct irq_domain *domain;
  112. struct msi_controller chip;
  113. unsigned long pages;
  114. struct mutex lock;
  115. int irq1;
  116. int irq2;
  117. };
  118. static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
  119. {
  120. return container_of(chip, struct rcar_msi, chip);
  121. }
  122. /* Structure representing the PCIe interface */
  123. struct rcar_pcie {
  124. struct device *dev;
  125. void __iomem *base;
  126. struct list_head resources;
  127. int root_bus_nr;
  128. struct clk *clk;
  129. struct clk *bus_clk;
  130. struct rcar_msi msi;
  131. };
  132. static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
  133. unsigned long reg)
  134. {
  135. writel(val, pcie->base + reg);
  136. }
  137. static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie,
  138. unsigned long reg)
  139. {
  140. return readl(pcie->base + reg);
  141. }
  142. enum {
  143. RCAR_PCI_ACCESS_READ,
  144. RCAR_PCI_ACCESS_WRITE,
  145. };
  146. static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
  147. {
  148. int shift = 8 * (where & 3);
  149. u32 val = rcar_pci_read_reg(pcie, where & ~3);
  150. val &= ~(mask << shift);
  151. val |= data << shift;
  152. rcar_pci_write_reg(pcie, val, where & ~3);
  153. }
  154. static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
  155. {
  156. int shift = 8 * (where & 3);
  157. u32 val = rcar_pci_read_reg(pcie, where & ~3);
  158. return val >> shift;
  159. }
  160. /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
  161. static int rcar_pcie_config_access(struct rcar_pcie *pcie,
  162. unsigned char access_type, struct pci_bus *bus,
  163. unsigned int devfn, int where, u32 *data)
  164. {
  165. int dev, func, reg, index;
  166. dev = PCI_SLOT(devfn);
  167. func = PCI_FUNC(devfn);
  168. reg = where & ~3;
  169. index = reg / 4;
  170. /*
  171. * While each channel has its own memory-mapped extended config
  172. * space, it's generally only accessible when in endpoint mode.
  173. * When in root complex mode, the controller is unable to target
  174. * itself with either type 0 or type 1 accesses, and indeed, any
  175. * controller initiated target transfer to its own config space
  176. * result in a completer abort.
  177. *
  178. * Each channel effectively only supports a single device, but as
  179. * the same channel <-> device access works for any PCI_SLOT()
  180. * value, we cheat a bit here and bind the controller's config
  181. * space to devfn 0 in order to enable self-enumeration. In this
  182. * case the regular ECAR/ECDR path is sidelined and the mangled
  183. * config access itself is initiated as an internal bus transaction.
  184. */
  185. if (pci_is_root_bus(bus)) {
  186. if (dev != 0)
  187. return PCIBIOS_DEVICE_NOT_FOUND;
  188. if (access_type == RCAR_PCI_ACCESS_READ) {
  189. *data = rcar_pci_read_reg(pcie, PCICONF(index));
  190. } else {
  191. /* Keep an eye out for changes to the root bus number */
  192. if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
  193. pcie->root_bus_nr = *data & 0xff;
  194. rcar_pci_write_reg(pcie, *data, PCICONF(index));
  195. }
  196. return PCIBIOS_SUCCESSFUL;
  197. }
  198. if (pcie->root_bus_nr < 0)
  199. return PCIBIOS_DEVICE_NOT_FOUND;
  200. /* Clear errors */
  201. rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
  202. /* Set the PIO address */
  203. rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
  204. PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
  205. /* Enable the configuration access */
  206. if (bus->parent->number == pcie->root_bus_nr)
  207. rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
  208. else
  209. rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
  210. /* Check for errors */
  211. if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
  212. return PCIBIOS_DEVICE_NOT_FOUND;
  213. /* Check for master and target aborts */
  214. if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
  215. (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
  216. return PCIBIOS_DEVICE_NOT_FOUND;
  217. if (access_type == RCAR_PCI_ACCESS_READ)
  218. *data = rcar_pci_read_reg(pcie, PCIECDR);
  219. else
  220. rcar_pci_write_reg(pcie, *data, PCIECDR);
  221. /* Disable the configuration access */
  222. rcar_pci_write_reg(pcie, 0, PCIECCTLR);
  223. return PCIBIOS_SUCCESSFUL;
  224. }
  225. static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
  226. int where, int size, u32 *val)
  227. {
  228. struct rcar_pcie *pcie = bus->sysdata;
  229. int ret;
  230. ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
  231. bus, devfn, where, val);
  232. if (ret != PCIBIOS_SUCCESSFUL) {
  233. *val = 0xffffffff;
  234. return ret;
  235. }
  236. if (size == 1)
  237. *val = (*val >> (8 * (where & 3))) & 0xff;
  238. else if (size == 2)
  239. *val = (*val >> (8 * (where & 2))) & 0xffff;
  240. dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
  241. bus->number, devfn, where, size, (unsigned long)*val);
  242. return ret;
  243. }
  244. /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
  245. static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
  246. int where, int size, u32 val)
  247. {
  248. struct rcar_pcie *pcie = bus->sysdata;
  249. int shift, ret;
  250. u32 data;
  251. ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
  252. bus, devfn, where, &data);
  253. if (ret != PCIBIOS_SUCCESSFUL)
  254. return ret;
  255. dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
  256. bus->number, devfn, where, size, (unsigned long)val);
  257. if (size == 1) {
  258. shift = 8 * (where & 3);
  259. data &= ~(0xff << shift);
  260. data |= ((val & 0xff) << shift);
  261. } else if (size == 2) {
  262. shift = 8 * (where & 2);
  263. data &= ~(0xffff << shift);
  264. data |= ((val & 0xffff) << shift);
  265. } else
  266. data = val;
  267. ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE,
  268. bus, devfn, where, &data);
  269. return ret;
  270. }
  271. static struct pci_ops rcar_pcie_ops = {
  272. .read = rcar_pcie_read_conf,
  273. .write = rcar_pcie_write_conf,
  274. };
  275. static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
  276. struct resource *res)
  277. {
  278. /* Setup PCIe address space mappings for each resource */
  279. resource_size_t size;
  280. resource_size_t res_start;
  281. u32 mask;
  282. rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
  283. /*
  284. * The PAMR mask is calculated in units of 128Bytes, which
  285. * keeps things pretty simple.
  286. */
  287. size = resource_size(res);
  288. mask = (roundup_pow_of_two(size) / SZ_128) - 1;
  289. rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
  290. if (res->flags & IORESOURCE_IO)
  291. res_start = pci_pio_to_address(res->start);
  292. else
  293. res_start = res->start;
  294. rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
  295. rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
  296. PCIEPALR(win));
  297. /* First resource is for IO */
  298. mask = PAR_ENABLE;
  299. if (res->flags & IORESOURCE_IO)
  300. mask |= IO_SPACE;
  301. rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
  302. }
  303. static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
  304. {
  305. struct resource_entry *win;
  306. int i = 0;
  307. /* Setup PCI resources */
  308. resource_list_for_each_entry(win, &pci->resources) {
  309. struct resource *res = win->res;
  310. if (!res->flags)
  311. continue;
  312. switch (resource_type(res)) {
  313. case IORESOURCE_IO:
  314. case IORESOURCE_MEM:
  315. rcar_pcie_setup_window(i, pci, res);
  316. i++;
  317. break;
  318. case IORESOURCE_BUS:
  319. pci->root_bus_nr = res->start;
  320. break;
  321. default:
  322. continue;
  323. }
  324. pci_add_resource(resource, res);
  325. }
  326. return 1;
  327. }
  328. static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
  329. {
  330. struct device *dev = pcie->dev;
  331. unsigned int timeout = 1000;
  332. u32 macsr;
  333. if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
  334. return;
  335. if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
  336. dev_err(dev, "Speed change already in progress\n");
  337. return;
  338. }
  339. macsr = rcar_pci_read_reg(pcie, MACSR);
  340. if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
  341. goto done;
  342. /* Set target link speed to 5.0 GT/s */
  343. rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
  344. PCI_EXP_LNKSTA_CLS_5_0GB);
  345. /* Set speed change reason as intentional factor */
  346. rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
  347. /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
  348. if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
  349. rcar_pci_write_reg(pcie, macsr, MACSR);
  350. /* Start link speed change */
  351. rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
  352. while (timeout--) {
  353. macsr = rcar_pci_read_reg(pcie, MACSR);
  354. if (macsr & SPCHGFIN) {
  355. /* Clear the interrupt bits */
  356. rcar_pci_write_reg(pcie, macsr, MACSR);
  357. if (macsr & SPCHGFAIL)
  358. dev_err(dev, "Speed change failed\n");
  359. goto done;
  360. }
  361. msleep(1);
  362. }
  363. dev_err(dev, "Speed change timed out\n");
  364. done:
  365. dev_info(dev, "Current link speed is %s GT/s\n",
  366. (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
  367. }
  368. static int rcar_pcie_enable(struct rcar_pcie *pcie)
  369. {
  370. struct device *dev = pcie->dev;
  371. struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
  372. struct pci_bus *bus, *child;
  373. int ret;
  374. /* Try setting 5 GT/s link speed */
  375. rcar_pcie_force_speedup(pcie);
  376. rcar_pcie_setup(&bridge->windows, pcie);
  377. pci_add_flags(PCI_REASSIGN_ALL_BUS);
  378. bridge->dev.parent = dev;
  379. bridge->sysdata = pcie;
  380. bridge->busnr = pcie->root_bus_nr;
  381. bridge->ops = &rcar_pcie_ops;
  382. bridge->map_irq = of_irq_parse_and_map_pci;
  383. bridge->swizzle_irq = pci_common_swizzle;
  384. if (IS_ENABLED(CONFIG_PCI_MSI))
  385. bridge->msi = &pcie->msi.chip;
  386. ret = pci_scan_root_bus_bridge(bridge);
  387. if (ret < 0)
  388. return ret;
  389. bus = bridge->bus;
  390. pci_bus_size_bridges(bus);
  391. pci_bus_assign_resources(bus);
  392. list_for_each_entry(child, &bus->children, node)
  393. pcie_bus_configure_settings(child);
  394. pci_bus_add_devices(bus);
  395. return 0;
  396. }
  397. static int phy_wait_for_ack(struct rcar_pcie *pcie)
  398. {
  399. struct device *dev = pcie->dev;
  400. unsigned int timeout = 100;
  401. while (timeout--) {
  402. if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
  403. return 0;
  404. udelay(100);
  405. }
  406. dev_err(dev, "Access to PCIe phy timed out\n");
  407. return -ETIMEDOUT;
  408. }
  409. static void phy_write_reg(struct rcar_pcie *pcie,
  410. unsigned int rate, unsigned int addr,
  411. unsigned int lane, unsigned int data)
  412. {
  413. unsigned long phyaddr;
  414. phyaddr = WRITE_CMD |
  415. ((rate & 1) << RATE_POS) |
  416. ((lane & 0xf) << LANE_POS) |
  417. ((addr & 0xff) << ADR_POS);
  418. /* Set write data */
  419. rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
  420. rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
  421. /* Ignore errors as they will be dealt with if the data link is down */
  422. phy_wait_for_ack(pcie);
  423. /* Clear command */
  424. rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
  425. rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
  426. /* Ignore errors as they will be dealt with if the data link is down */
  427. phy_wait_for_ack(pcie);
  428. }
  429. static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
  430. {
  431. unsigned int timeout = 10;
  432. while (timeout--) {
  433. if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
  434. return 0;
  435. msleep(5);
  436. }
  437. return -ETIMEDOUT;
  438. }
  439. static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
  440. {
  441. int err;
  442. /* Begin initialization */
  443. rcar_pci_write_reg(pcie, 0, PCIETCTLR);
  444. /* Set mode */
  445. rcar_pci_write_reg(pcie, 1, PCIEMSR);
  446. /*
  447. * Initial header for port config space is type 1, set the device
  448. * class to match. Hardware takes care of propagating the IDSETR
  449. * settings, so there is no need to bother with a quirk.
  450. */
  451. rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
  452. /*
  453. * Setup Secondary Bus Number & Subordinate Bus Number, even though
  454. * they aren't used, to avoid bridge being detected as broken.
  455. */
  456. rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
  457. rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
  458. /* Initialize default capabilities. */
  459. rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
  460. rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
  461. PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
  462. rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
  463. PCI_HEADER_TYPE_BRIDGE);
  464. /* Enable data link layer active state reporting */
  465. rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
  466. PCI_EXP_LNKCAP_DLLLARC);
  467. /* Write out the physical slot number = 0 */
  468. rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
  469. /* Set the completion timer timeout to the maximum 50ms. */
  470. rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
  471. /* Terminate list of capabilities (Next Capability Offset=0) */
  472. rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
  473. /* Enable MSI */
  474. if (IS_ENABLED(CONFIG_PCI_MSI))
  475. rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
  476. /* Finish initialization - establish a PCI Express link */
  477. rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
  478. /* This will timeout if we don't have a link. */
  479. err = rcar_pcie_wait_for_dl(pcie);
  480. if (err)
  481. return err;
  482. /* Enable INTx interrupts */
  483. rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
  484. wmb();
  485. return 0;
  486. }
  487. static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
  488. {
  489. unsigned int timeout = 10;
  490. /* Initialize the phy */
  491. phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
  492. phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
  493. phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
  494. phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
  495. phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
  496. phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
  497. phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
  498. phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
  499. phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
  500. phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
  501. phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
  502. phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
  503. phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
  504. phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
  505. phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
  506. while (timeout--) {
  507. if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
  508. return rcar_pcie_hw_init(pcie);
  509. msleep(5);
  510. }
  511. return -ETIMEDOUT;
  512. }
  513. static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie)
  514. {
  515. /*
  516. * These settings come from the R-Car Series, 2nd Generation User's
  517. * Manual, section 50.3.1 (2) Initialization of the physical layer.
  518. */
  519. rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
  520. rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
  521. rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
  522. rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
  523. rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
  524. /* The following value is for DC connection, no termination resistor */
  525. rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
  526. rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
  527. rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
  528. return rcar_pcie_hw_init(pcie);
  529. }
  530. static int rcar_msi_alloc(struct rcar_msi *chip)
  531. {
  532. int msi;
  533. mutex_lock(&chip->lock);
  534. msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
  535. if (msi < INT_PCI_MSI_NR)
  536. set_bit(msi, chip->used);
  537. else
  538. msi = -ENOSPC;
  539. mutex_unlock(&chip->lock);
  540. return msi;
  541. }
  542. static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
  543. {
  544. int msi;
  545. mutex_lock(&chip->lock);
  546. msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
  547. order_base_2(no_irqs));
  548. mutex_unlock(&chip->lock);
  549. return msi;
  550. }
  551. static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
  552. {
  553. mutex_lock(&chip->lock);
  554. clear_bit(irq, chip->used);
  555. mutex_unlock(&chip->lock);
  556. }
  557. static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
  558. {
  559. struct rcar_pcie *pcie = data;
  560. struct rcar_msi *msi = &pcie->msi;
  561. struct device *dev = pcie->dev;
  562. unsigned long reg;
  563. reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
  564. /* MSI & INTx share an interrupt - we only handle MSI here */
  565. if (!reg)
  566. return IRQ_NONE;
  567. while (reg) {
  568. unsigned int index = find_first_bit(&reg, 32);
  569. unsigned int irq;
  570. /* clear the interrupt */
  571. rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
  572. irq = irq_find_mapping(msi->domain, index);
  573. if (irq) {
  574. if (test_bit(index, msi->used))
  575. generic_handle_irq(irq);
  576. else
  577. dev_info(dev, "unhandled MSI\n");
  578. } else {
  579. /* Unknown MSI, just clear it */
  580. dev_dbg(dev, "unexpected MSI\n");
  581. }
  582. /* see if there's any more pending in this vector */
  583. reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
  584. }
  585. return IRQ_HANDLED;
  586. }
  587. static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
  588. struct msi_desc *desc)
  589. {
  590. struct rcar_msi *msi = to_rcar_msi(chip);
  591. struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
  592. struct msi_msg msg;
  593. unsigned int irq;
  594. int hwirq;
  595. hwirq = rcar_msi_alloc(msi);
  596. if (hwirq < 0)
  597. return hwirq;
  598. irq = irq_find_mapping(msi->domain, hwirq);
  599. if (!irq) {
  600. rcar_msi_free(msi, hwirq);
  601. return -EINVAL;
  602. }
  603. irq_set_msi_desc(irq, desc);
  604. msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
  605. msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
  606. msg.data = hwirq;
  607. pci_write_msi_msg(irq, &msg);
  608. return 0;
  609. }
  610. static int rcar_msi_setup_irqs(struct msi_controller *chip,
  611. struct pci_dev *pdev, int nvec, int type)
  612. {
  613. struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
  614. struct rcar_msi *msi = to_rcar_msi(chip);
  615. struct msi_desc *desc;
  616. struct msi_msg msg;
  617. unsigned int irq;
  618. int hwirq;
  619. int i;
  620. /* MSI-X interrupts are not supported */
  621. if (type == PCI_CAP_ID_MSIX)
  622. return -EINVAL;
  623. WARN_ON(!list_is_singular(&pdev->dev.msi_list));
  624. desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
  625. hwirq = rcar_msi_alloc_region(msi, nvec);
  626. if (hwirq < 0)
  627. return -ENOSPC;
  628. irq = irq_find_mapping(msi->domain, hwirq);
  629. if (!irq)
  630. return -ENOSPC;
  631. for (i = 0; i < nvec; i++) {
  632. /*
  633. * irq_create_mapping() called from rcar_pcie_probe() pre-
  634. * allocates descs, so there is no need to allocate descs here.
  635. * We can therefore assume that if irq_find_mapping() above
  636. * returns non-zero, then the descs are also successfully
  637. * allocated.
  638. */
  639. if (irq_set_msi_desc_off(irq, i, desc)) {
  640. /* TODO: clear */
  641. return -EINVAL;
  642. }
  643. }
  644. desc->nvec_used = nvec;
  645. desc->msi_attrib.multiple = order_base_2(nvec);
  646. msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
  647. msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
  648. msg.data = hwirq;
  649. pci_write_msi_msg(irq, &msg);
  650. return 0;
  651. }
  652. static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
  653. {
  654. struct rcar_msi *msi = to_rcar_msi(chip);
  655. struct irq_data *d = irq_get_irq_data(irq);
  656. rcar_msi_free(msi, d->hwirq);
  657. }
  658. static struct irq_chip rcar_msi_irq_chip = {
  659. .name = "R-Car PCIe MSI",
  660. .irq_enable = pci_msi_unmask_irq,
  661. .irq_disable = pci_msi_mask_irq,
  662. .irq_mask = pci_msi_mask_irq,
  663. .irq_unmask = pci_msi_unmask_irq,
  664. };
  665. static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
  666. irq_hw_number_t hwirq)
  667. {
  668. irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
  669. irq_set_chip_data(irq, domain->host_data);
  670. return 0;
  671. }
  672. static const struct irq_domain_ops msi_domain_ops = {
  673. .map = rcar_msi_map,
  674. };
  675. static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
  676. {
  677. struct device *dev = pcie->dev;
  678. struct rcar_msi *msi = &pcie->msi;
  679. unsigned long base;
  680. int err, i;
  681. mutex_init(&msi->lock);
  682. msi->chip.dev = dev;
  683. msi->chip.setup_irq = rcar_msi_setup_irq;
  684. msi->chip.setup_irqs = rcar_msi_setup_irqs;
  685. msi->chip.teardown_irq = rcar_msi_teardown_irq;
  686. msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
  687. &msi_domain_ops, &msi->chip);
  688. if (!msi->domain) {
  689. dev_err(dev, "failed to create IRQ domain\n");
  690. return -ENOMEM;
  691. }
  692. for (i = 0; i < INT_PCI_MSI_NR; i++)
  693. irq_create_mapping(msi->domain, i);
  694. /* Two irqs are for MSI, but they are also used for non-MSI irqs */
  695. err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
  696. IRQF_SHARED | IRQF_NO_THREAD,
  697. rcar_msi_irq_chip.name, pcie);
  698. if (err < 0) {
  699. dev_err(dev, "failed to request IRQ: %d\n", err);
  700. goto err;
  701. }
  702. err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
  703. IRQF_SHARED | IRQF_NO_THREAD,
  704. rcar_msi_irq_chip.name, pcie);
  705. if (err < 0) {
  706. dev_err(dev, "failed to request IRQ: %d\n", err);
  707. goto err;
  708. }
  709. /* setup MSI data target */
  710. msi->pages = __get_free_pages(GFP_KERNEL, 0);
  711. base = virt_to_phys((void *)msi->pages);
  712. rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
  713. rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
  714. /* enable all MSI interrupts */
  715. rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
  716. return 0;
  717. err:
  718. irq_domain_remove(msi->domain);
  719. return err;
  720. }
  721. static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
  722. {
  723. struct device *dev = pcie->dev;
  724. struct resource res;
  725. int err, i;
  726. err = of_address_to_resource(dev->of_node, 0, &res);
  727. if (err)
  728. return err;
  729. pcie->base = devm_ioremap_resource(dev, &res);
  730. if (IS_ERR(pcie->base))
  731. return PTR_ERR(pcie->base);
  732. pcie->clk = devm_clk_get(dev, "pcie");
  733. if (IS_ERR(pcie->clk)) {
  734. dev_err(dev, "cannot get platform clock\n");
  735. return PTR_ERR(pcie->clk);
  736. }
  737. err = clk_prepare_enable(pcie->clk);
  738. if (err)
  739. return err;
  740. pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
  741. if (IS_ERR(pcie->bus_clk)) {
  742. dev_err(dev, "cannot get pcie bus clock\n");
  743. err = PTR_ERR(pcie->bus_clk);
  744. goto fail_clk;
  745. }
  746. err = clk_prepare_enable(pcie->bus_clk);
  747. if (err)
  748. goto fail_clk;
  749. i = irq_of_parse_and_map(dev->of_node, 0);
  750. if (!i) {
  751. dev_err(dev, "cannot get platform resources for msi interrupt\n");
  752. err = -ENOENT;
  753. goto err_map_reg;
  754. }
  755. pcie->msi.irq1 = i;
  756. i = irq_of_parse_and_map(dev->of_node, 1);
  757. if (!i) {
  758. dev_err(dev, "cannot get platform resources for msi interrupt\n");
  759. err = -ENOENT;
  760. goto err_map_reg;
  761. }
  762. pcie->msi.irq2 = i;
  763. return 0;
  764. err_map_reg:
  765. clk_disable_unprepare(pcie->bus_clk);
  766. fail_clk:
  767. clk_disable_unprepare(pcie->clk);
  768. return err;
  769. }
  770. static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
  771. struct of_pci_range *range,
  772. int *index)
  773. {
  774. u64 restype = range->flags;
  775. u64 cpu_addr = range->cpu_addr;
  776. u64 cpu_end = range->cpu_addr + range->size;
  777. u64 pci_addr = range->pci_addr;
  778. u32 flags = LAM_64BIT | LAR_ENABLE;
  779. u64 mask;
  780. u64 size;
  781. int idx = *index;
  782. if (restype & IORESOURCE_PREFETCH)
  783. flags |= LAM_PREFETCH;
  784. /*
  785. * If the size of the range is larger than the alignment of the start
  786. * address, we have to use multiple entries to perform the mapping.
  787. */
  788. if (cpu_addr > 0) {
  789. unsigned long nr_zeros = __ffs64(cpu_addr);
  790. u64 alignment = 1ULL << nr_zeros;
  791. size = min(range->size, alignment);
  792. } else {
  793. size = range->size;
  794. }
  795. /* Hardware supports max 4GiB inbound region */
  796. size = min(size, 1ULL << 32);
  797. mask = roundup_pow_of_two(size) - 1;
  798. mask &= ~0xf;
  799. while (cpu_addr < cpu_end) {
  800. /*
  801. * Set up 64-bit inbound regions as the range parser doesn't
  802. * distinguish between 32 and 64-bit types.
  803. */
  804. rcar_pci_write_reg(pcie, lower_32_bits(pci_addr),
  805. PCIEPRAR(idx));
  806. rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
  807. rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags,
  808. PCIELAMR(idx));
  809. rcar_pci_write_reg(pcie, upper_32_bits(pci_addr),
  810. PCIEPRAR(idx + 1));
  811. rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr),
  812. PCIELAR(idx + 1));
  813. rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
  814. pci_addr += size;
  815. cpu_addr += size;
  816. idx += 2;
  817. if (idx > MAX_NR_INBOUND_MAPS) {
  818. dev_err(pcie->dev, "Failed to map inbound regions!\n");
  819. return -EINVAL;
  820. }
  821. }
  822. *index = idx;
  823. return 0;
  824. }
  825. static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
  826. struct device_node *np)
  827. {
  828. struct of_pci_range range;
  829. struct of_pci_range_parser parser;
  830. int index = 0;
  831. int err;
  832. if (of_pci_dma_range_parser_init(&parser, np))
  833. return -EINVAL;
  834. /* Get the dma-ranges from DT */
  835. for_each_of_pci_range(&parser, &range) {
  836. u64 end = range.cpu_addr + range.size - 1;
  837. dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
  838. range.flags, range.cpu_addr, end, range.pci_addr);
  839. err = rcar_pcie_inbound_ranges(pcie, &range, &index);
  840. if (err)
  841. return err;
  842. }
  843. return 0;
  844. }
  845. static const struct of_device_id rcar_pcie_of_match[] = {
  846. { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
  847. { .compatible = "renesas,pcie-r8a7790",
  848. .data = rcar_pcie_hw_init_gen2 },
  849. { .compatible = "renesas,pcie-r8a7791",
  850. .data = rcar_pcie_hw_init_gen2 },
  851. { .compatible = "renesas,pcie-rcar-gen2",
  852. .data = rcar_pcie_hw_init_gen2 },
  853. { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
  854. { .compatible = "renesas,pcie-rcar-gen3", .data = rcar_pcie_hw_init },
  855. {},
  856. };
  857. static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
  858. {
  859. int err;
  860. struct device *dev = pci->dev;
  861. struct device_node *np = dev->of_node;
  862. resource_size_t iobase;
  863. struct resource_entry *win, *tmp;
  864. err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
  865. &iobase);
  866. if (err)
  867. return err;
  868. err = devm_request_pci_bus_resources(dev, &pci->resources);
  869. if (err)
  870. goto out_release_res;
  871. resource_list_for_each_entry_safe(win, tmp, &pci->resources) {
  872. struct resource *res = win->res;
  873. if (resource_type(res) == IORESOURCE_IO) {
  874. err = pci_remap_iospace(res, iobase);
  875. if (err) {
  876. dev_warn(dev, "error %d: failed to map resource %pR\n",
  877. err, res);
  878. resource_list_destroy_entry(win);
  879. }
  880. }
  881. }
  882. return 0;
  883. out_release_res:
  884. pci_free_resource_list(&pci->resources);
  885. return err;
  886. }
  887. static int rcar_pcie_probe(struct platform_device *pdev)
  888. {
  889. struct device *dev = &pdev->dev;
  890. struct rcar_pcie *pcie;
  891. unsigned int data;
  892. int err;
  893. int (*hw_init_fn)(struct rcar_pcie *);
  894. struct pci_host_bridge *bridge;
  895. bridge = pci_alloc_host_bridge(sizeof(*pcie));
  896. if (!bridge)
  897. return -ENOMEM;
  898. pcie = pci_host_bridge_priv(bridge);
  899. pcie->dev = dev;
  900. INIT_LIST_HEAD(&pcie->resources);
  901. err = rcar_pcie_parse_request_of_pci_ranges(pcie);
  902. if (err)
  903. goto err_free_bridge;
  904. err = rcar_pcie_get_resources(pcie);
  905. if (err < 0) {
  906. dev_err(dev, "failed to request resources: %d\n", err);
  907. goto err_free_resource_list;
  908. }
  909. err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
  910. if (err)
  911. goto err_free_resource_list;
  912. pm_runtime_enable(dev);
  913. err = pm_runtime_get_sync(dev);
  914. if (err < 0) {
  915. dev_err(dev, "pm_runtime_get_sync failed\n");
  916. goto err_pm_disable;
  917. }
  918. /* Failure to get a link might just be that no cards are inserted */
  919. hw_init_fn = of_device_get_match_data(dev);
  920. err = hw_init_fn(pcie);
  921. if (err) {
  922. dev_info(dev, "PCIe link down\n");
  923. err = -ENODEV;
  924. goto err_pm_put;
  925. }
  926. data = rcar_pci_read_reg(pcie, MACSR);
  927. dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
  928. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  929. err = rcar_pcie_enable_msi(pcie);
  930. if (err < 0) {
  931. dev_err(dev,
  932. "failed to enable MSI support: %d\n",
  933. err);
  934. goto err_pm_put;
  935. }
  936. }
  937. err = rcar_pcie_enable(pcie);
  938. if (err)
  939. goto err_pm_put;
  940. return 0;
  941. err_pm_put:
  942. pm_runtime_put(dev);
  943. err_pm_disable:
  944. pm_runtime_disable(dev);
  945. err_free_resource_list:
  946. pci_free_resource_list(&pcie->resources);
  947. err_free_bridge:
  948. pci_free_host_bridge(bridge);
  949. return err;
  950. }
  951. static struct platform_driver rcar_pcie_driver = {
  952. .driver = {
  953. .name = "rcar-pcie",
  954. .of_match_table = rcar_pcie_of_match,
  955. .suppress_bind_attrs = true,
  956. },
  957. .probe = rcar_pcie_probe,
  958. };
  959. builtin_platform_driver(rcar_pcie_driver);