pci-imx6.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. /*
  2. * PCIe host controller driver for Freescale i.MX6 SoCs
  3. *
  4. * Copyright (C) 2013 Kosagi
  5. * http://www.kosagi.com
  6. *
  7. * Author: Sean Cross <xobs@kosagi.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/gpio.h>
  16. #include <linux/kernel.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  19. #include <linux/module.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/pci.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/regmap.h>
  24. #include <linux/resource.h>
  25. #include <linux/signal.h>
  26. #include <linux/types.h>
  27. #include <linux/interrupt.h>
  28. #include "pcie-designware.h"
  29. #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
  30. struct imx6_pcie {
  31. int reset_gpio;
  32. struct clk *pcie_bus;
  33. struct clk *pcie_phy;
  34. struct clk *pcie;
  35. struct pcie_port pp;
  36. struct regmap *iomuxc_gpr;
  37. void __iomem *mem_base;
  38. };
  39. /* PCIe Root Complex registers (memory-mapped) */
  40. #define PCIE_RC_LCR 0x7c
  41. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
  42. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
  43. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
  44. /* PCIe Port Logic registers (memory-mapped) */
  45. #define PL_OFFSET 0x700
  46. #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
  47. #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
  48. #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
  49. #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
  50. #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
  51. #define PCIE_PHY_CTRL_DATA_LOC 0
  52. #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
  53. #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
  54. #define PCIE_PHY_CTRL_WR_LOC 18
  55. #define PCIE_PHY_CTRL_RD_LOC 19
  56. #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
  57. #define PCIE_PHY_STAT_ACK_LOC 16
  58. #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
  59. #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
  60. /* PHY registers (not memory-mapped) */
  61. #define PCIE_PHY_RX_ASIC_OUT 0x100D
  62. #define PHY_RX_OVRD_IN_LO 0x1005
  63. #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
  64. #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
  65. static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
  66. {
  67. u32 val;
  68. u32 max_iterations = 10;
  69. u32 wait_counter = 0;
  70. do {
  71. val = readl(dbi_base + PCIE_PHY_STAT);
  72. val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
  73. wait_counter++;
  74. if (val == exp_val)
  75. return 0;
  76. udelay(1);
  77. } while (wait_counter < max_iterations);
  78. return -ETIMEDOUT;
  79. }
  80. static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
  81. {
  82. u32 val;
  83. int ret;
  84. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  85. writel(val, dbi_base + PCIE_PHY_CTRL);
  86. val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
  87. writel(val, dbi_base + PCIE_PHY_CTRL);
  88. ret = pcie_phy_poll_ack(dbi_base, 1);
  89. if (ret)
  90. return ret;
  91. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  92. writel(val, dbi_base + PCIE_PHY_CTRL);
  93. ret = pcie_phy_poll_ack(dbi_base, 0);
  94. if (ret)
  95. return ret;
  96. return 0;
  97. }
  98. /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
  99. static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
  100. {
  101. u32 val, phy_ctl;
  102. int ret;
  103. ret = pcie_phy_wait_ack(dbi_base, addr);
  104. if (ret)
  105. return ret;
  106. /* assert Read signal */
  107. phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
  108. writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
  109. ret = pcie_phy_poll_ack(dbi_base, 1);
  110. if (ret)
  111. return ret;
  112. val = readl(dbi_base + PCIE_PHY_STAT);
  113. *data = val & 0xffff;
  114. /* deassert Read signal */
  115. writel(0x00, dbi_base + PCIE_PHY_CTRL);
  116. ret = pcie_phy_poll_ack(dbi_base, 0);
  117. if (ret)
  118. return ret;
  119. return 0;
  120. }
  121. static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
  122. {
  123. u32 var;
  124. int ret;
  125. /* write addr */
  126. /* cap addr */
  127. ret = pcie_phy_wait_ack(dbi_base, addr);
  128. if (ret)
  129. return ret;
  130. var = data << PCIE_PHY_CTRL_DATA_LOC;
  131. writel(var, dbi_base + PCIE_PHY_CTRL);
  132. /* capture data */
  133. var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
  134. writel(var, dbi_base + PCIE_PHY_CTRL);
  135. ret = pcie_phy_poll_ack(dbi_base, 1);
  136. if (ret)
  137. return ret;
  138. /* deassert cap data */
  139. var = data << PCIE_PHY_CTRL_DATA_LOC;
  140. writel(var, dbi_base + PCIE_PHY_CTRL);
  141. /* wait for ack de-assertion */
  142. ret = pcie_phy_poll_ack(dbi_base, 0);
  143. if (ret)
  144. return ret;
  145. /* assert wr signal */
  146. var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
  147. writel(var, dbi_base + PCIE_PHY_CTRL);
  148. /* wait for ack */
  149. ret = pcie_phy_poll_ack(dbi_base, 1);
  150. if (ret)
  151. return ret;
  152. /* deassert wr signal */
  153. var = data << PCIE_PHY_CTRL_DATA_LOC;
  154. writel(var, dbi_base + PCIE_PHY_CTRL);
  155. /* wait for ack de-assertion */
  156. ret = pcie_phy_poll_ack(dbi_base, 0);
  157. if (ret)
  158. return ret;
  159. writel(0x0, dbi_base + PCIE_PHY_CTRL);
  160. return 0;
  161. }
  162. /* Added for PCI abort handling */
  163. static int imx6q_pcie_abort_handler(unsigned long addr,
  164. unsigned int fsr, struct pt_regs *regs)
  165. {
  166. return 0;
  167. }
  168. static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
  169. {
  170. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  171. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  172. IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
  173. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  174. IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
  175. return 0;
  176. }
  177. static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
  178. {
  179. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  180. int ret;
  181. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  182. IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
  183. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  184. IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
  185. ret = clk_prepare_enable(imx6_pcie->pcie_phy);
  186. if (ret) {
  187. dev_err(pp->dev, "unable to enable pcie_phy clock\n");
  188. goto err_pcie_phy;
  189. }
  190. ret = clk_prepare_enable(imx6_pcie->pcie_bus);
  191. if (ret) {
  192. dev_err(pp->dev, "unable to enable pcie_bus clock\n");
  193. goto err_pcie_bus;
  194. }
  195. ret = clk_prepare_enable(imx6_pcie->pcie);
  196. if (ret) {
  197. dev_err(pp->dev, "unable to enable pcie clock\n");
  198. goto err_pcie;
  199. }
  200. /* allow the clocks to stabilize */
  201. usleep_range(200, 500);
  202. /* Some boards don't have PCIe reset GPIO. */
  203. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  204. gpio_set_value(imx6_pcie->reset_gpio, 0);
  205. msleep(100);
  206. gpio_set_value(imx6_pcie->reset_gpio, 1);
  207. }
  208. return 0;
  209. err_pcie:
  210. clk_disable_unprepare(imx6_pcie->pcie_bus);
  211. err_pcie_bus:
  212. clk_disable_unprepare(imx6_pcie->pcie_phy);
  213. err_pcie_phy:
  214. return ret;
  215. }
  216. static void imx6_pcie_init_phy(struct pcie_port *pp)
  217. {
  218. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  219. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  220. IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
  221. /* configure constant input signal to the pcie ctrl and phy */
  222. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  223. IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
  224. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  225. IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
  226. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  227. IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
  228. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  229. IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
  230. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  231. IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
  232. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  233. IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
  234. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  235. IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
  236. }
  237. static int imx6_pcie_wait_for_link(struct pcie_port *pp)
  238. {
  239. int count = 200;
  240. while (!dw_pcie_link_up(pp)) {
  241. usleep_range(100, 1000);
  242. if (--count)
  243. continue;
  244. dev_err(pp->dev, "phy link never came up\n");
  245. dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
  246. readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
  247. readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
  248. return -EINVAL;
  249. }
  250. return 0;
  251. }
  252. static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
  253. {
  254. struct pcie_port *pp = arg;
  255. return dw_handle_msi_irq(pp);
  256. }
  257. static int imx6_pcie_start_link(struct pcie_port *pp)
  258. {
  259. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  260. uint32_t tmp;
  261. int ret, count;
  262. /*
  263. * Force Gen1 operation when starting the link. In case the link is
  264. * started in Gen2 mode, there is a possibility the devices on the
  265. * bus will not be detected at all. This happens with PCIe switches.
  266. */
  267. tmp = readl(pp->dbi_base + PCIE_RC_LCR);
  268. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  269. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
  270. writel(tmp, pp->dbi_base + PCIE_RC_LCR);
  271. /* Start LTSSM. */
  272. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  273. IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
  274. ret = imx6_pcie_wait_for_link(pp);
  275. if (ret)
  276. return ret;
  277. /* Allow Gen2 mode after the link is up. */
  278. tmp = readl(pp->dbi_base + PCIE_RC_LCR);
  279. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  280. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
  281. writel(tmp, pp->dbi_base + PCIE_RC_LCR);
  282. /*
  283. * Start Directed Speed Change so the best possible speed both link
  284. * partners support can be negotiated.
  285. */
  286. tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
  287. tmp |= PORT_LOGIC_SPEED_CHANGE;
  288. writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
  289. count = 200;
  290. while (count--) {
  291. tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
  292. /* Test if the speed change finished. */
  293. if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
  294. break;
  295. usleep_range(100, 1000);
  296. }
  297. /* Make sure link training is finished as well! */
  298. if (count)
  299. ret = imx6_pcie_wait_for_link(pp);
  300. else
  301. ret = -EINVAL;
  302. if (ret) {
  303. dev_err(pp->dev, "Failed to bring link up!\n");
  304. } else {
  305. tmp = readl(pp->dbi_base + 0x80);
  306. dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
  307. }
  308. return ret;
  309. }
  310. static void imx6_pcie_host_init(struct pcie_port *pp)
  311. {
  312. imx6_pcie_assert_core_reset(pp);
  313. imx6_pcie_init_phy(pp);
  314. imx6_pcie_deassert_core_reset(pp);
  315. dw_pcie_setup_rc(pp);
  316. imx6_pcie_start_link(pp);
  317. if (IS_ENABLED(CONFIG_PCI_MSI))
  318. dw_pcie_msi_init(pp);
  319. }
  320. static void imx6_pcie_reset_phy(struct pcie_port *pp)
  321. {
  322. uint32_t temp;
  323. pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
  324. temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  325. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  326. pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
  327. usleep_range(2000, 3000);
  328. pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
  329. temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  330. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  331. pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
  332. }
  333. static int imx6_pcie_link_up(struct pcie_port *pp)
  334. {
  335. u32 rc, debug_r0, rx_valid;
  336. int count = 5;
  337. /*
  338. * Test if the PHY reports that the link is up and also that the LTSSM
  339. * training finished. There are three possible states of the link when
  340. * this code is called:
  341. * 1) The link is DOWN (unlikely)
  342. * The link didn't come up yet for some reason. This usually means
  343. * we have a real problem somewhere. Reset the PHY and exit. This
  344. * state calls for inspection of the DEBUG registers.
  345. * 2) The link is UP, but still in LTSSM training
  346. * Wait for the training to finish, which should take a very short
  347. * time. If the training does not finish, we have a problem and we
  348. * need to inspect the DEBUG registers. If the training does finish,
  349. * the link is up and operating correctly.
  350. * 3) The link is UP and no longer in LTSSM training
  351. * The link is up and operating correctly.
  352. */
  353. while (1) {
  354. rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
  355. if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
  356. break;
  357. if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
  358. return 1;
  359. if (!count--)
  360. break;
  361. dev_dbg(pp->dev, "Link is up, but still in training\n");
  362. /*
  363. * Wait a little bit, then re-check if the link finished
  364. * the training.
  365. */
  366. usleep_range(1000, 2000);
  367. }
  368. /*
  369. * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
  370. * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
  371. * If (MAC/LTSSM.state == Recovery.RcvrLock)
  372. * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
  373. * to gen2 is stuck
  374. */
  375. pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
  376. debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
  377. if (rx_valid & 0x01)
  378. return 0;
  379. if ((debug_r0 & 0x3f) != 0x0d)
  380. return 0;
  381. dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
  382. dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
  383. imx6_pcie_reset_phy(pp);
  384. return 0;
  385. }
  386. static struct pcie_host_ops imx6_pcie_host_ops = {
  387. .link_up = imx6_pcie_link_up,
  388. .host_init = imx6_pcie_host_init,
  389. };
  390. static int __init imx6_add_pcie_port(struct pcie_port *pp,
  391. struct platform_device *pdev)
  392. {
  393. int ret;
  394. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  395. pp->msi_irq = platform_get_irq_byname(pdev, "msi");
  396. if (pp->msi_irq <= 0) {
  397. dev_err(&pdev->dev, "failed to get MSI irq\n");
  398. return -ENODEV;
  399. }
  400. ret = devm_request_irq(&pdev->dev, pp->msi_irq,
  401. imx6_pcie_msi_handler,
  402. IRQF_SHARED, "mx6-pcie-msi", pp);
  403. if (ret) {
  404. dev_err(&pdev->dev, "failed to request MSI irq\n");
  405. return -ENODEV;
  406. }
  407. }
  408. pp->root_bus_nr = -1;
  409. pp->ops = &imx6_pcie_host_ops;
  410. ret = dw_pcie_host_init(pp);
  411. if (ret) {
  412. dev_err(&pdev->dev, "failed to initialize host\n");
  413. return ret;
  414. }
  415. return 0;
  416. }
  417. static int __init imx6_pcie_probe(struct platform_device *pdev)
  418. {
  419. struct imx6_pcie *imx6_pcie;
  420. struct pcie_port *pp;
  421. struct device_node *np = pdev->dev.of_node;
  422. struct resource *dbi_base;
  423. int ret;
  424. imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
  425. if (!imx6_pcie)
  426. return -ENOMEM;
  427. pp = &imx6_pcie->pp;
  428. pp->dev = &pdev->dev;
  429. /* Added for PCI abort handling */
  430. hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
  431. "imprecise external abort");
  432. dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  433. pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
  434. if (IS_ERR(pp->dbi_base))
  435. return PTR_ERR(pp->dbi_base);
  436. /* Fetch GPIOs */
  437. imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
  438. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  439. ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
  440. GPIOF_OUT_INIT_LOW, "PCIe reset");
  441. if (ret) {
  442. dev_err(&pdev->dev, "unable to get reset gpio\n");
  443. return ret;
  444. }
  445. }
  446. /* Fetch clocks */
  447. imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
  448. if (IS_ERR(imx6_pcie->pcie_phy)) {
  449. dev_err(&pdev->dev,
  450. "pcie_phy clock source missing or invalid\n");
  451. return PTR_ERR(imx6_pcie->pcie_phy);
  452. }
  453. imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
  454. if (IS_ERR(imx6_pcie->pcie_bus)) {
  455. dev_err(&pdev->dev,
  456. "pcie_bus clock source missing or invalid\n");
  457. return PTR_ERR(imx6_pcie->pcie_bus);
  458. }
  459. imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
  460. if (IS_ERR(imx6_pcie->pcie)) {
  461. dev_err(&pdev->dev,
  462. "pcie clock source missing or invalid\n");
  463. return PTR_ERR(imx6_pcie->pcie);
  464. }
  465. /* Grab GPR config register range */
  466. imx6_pcie->iomuxc_gpr =
  467. syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
  468. if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
  469. dev_err(&pdev->dev, "unable to find iomuxc registers\n");
  470. return PTR_ERR(imx6_pcie->iomuxc_gpr);
  471. }
  472. ret = imx6_add_pcie_port(pp, pdev);
  473. if (ret < 0)
  474. return ret;
  475. platform_set_drvdata(pdev, imx6_pcie);
  476. return 0;
  477. }
  478. static const struct of_device_id imx6_pcie_of_match[] = {
  479. { .compatible = "fsl,imx6q-pcie", },
  480. {},
  481. };
  482. MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
  483. static struct platform_driver imx6_pcie_driver = {
  484. .driver = {
  485. .name = "imx6q-pcie",
  486. .owner = THIS_MODULE,
  487. .of_match_table = imx6_pcie_of_match,
  488. },
  489. };
  490. /* Freescale PCIe driver does not allow module unload */
  491. static int __init imx6_pcie_init(void)
  492. {
  493. return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
  494. }
  495. fs_initcall(imx6_pcie_init);
  496. MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
  497. MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
  498. MODULE_LICENSE("GPL v2");