pci-imx6.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. /*
  2. * PCIe host controller driver for Freescale i.MX6 SoCs
  3. *
  4. * Copyright (C) 2013 Kosagi
  5. * http://www.kosagi.com
  6. *
  7. * Author: Sean Cross <xobs@kosagi.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/gpio.h>
  16. #include <linux/kernel.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  19. #include <linux/module.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/pci.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/regmap.h>
  24. #include <linux/resource.h>
  25. #include <linux/signal.h>
  26. #include <linux/types.h>
  27. #include <linux/interrupt.h>
  28. #include "pcie-designware.h"
  29. #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
  30. struct imx6_pcie {
  31. int reset_gpio;
  32. struct clk *pcie_bus;
  33. struct clk *pcie_phy;
  34. struct clk *pcie;
  35. struct pcie_port pp;
  36. struct regmap *iomuxc_gpr;
  37. void __iomem *mem_base;
  38. };
  39. /* PCIe Root Complex registers (memory-mapped) */
  40. #define PCIE_RC_LCR 0x7c
  41. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
  42. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
  43. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
  44. #define PCIE_RC_LCSR 0x80
  45. /* PCIe Port Logic registers (memory-mapped) */
  46. #define PL_OFFSET 0x700
  47. #define PCIE_PL_PFLR (PL_OFFSET + 0x08)
  48. #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
  49. #define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
  50. #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
  51. #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
  52. #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
  53. #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
  54. #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
  55. #define PCIE_PHY_CTRL_DATA_LOC 0
  56. #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
  57. #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
  58. #define PCIE_PHY_CTRL_WR_LOC 18
  59. #define PCIE_PHY_CTRL_RD_LOC 19
  60. #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
  61. #define PCIE_PHY_STAT_ACK_LOC 16
  62. #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
  63. #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
  64. /* PHY registers (not memory-mapped) */
  65. #define PCIE_PHY_RX_ASIC_OUT 0x100D
  66. #define PHY_RX_OVRD_IN_LO 0x1005
  67. #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
  68. #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
  69. static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
  70. {
  71. u32 val;
  72. u32 max_iterations = 10;
  73. u32 wait_counter = 0;
  74. do {
  75. val = readl(dbi_base + PCIE_PHY_STAT);
  76. val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
  77. wait_counter++;
  78. if (val == exp_val)
  79. return 0;
  80. udelay(1);
  81. } while (wait_counter < max_iterations);
  82. return -ETIMEDOUT;
  83. }
  84. static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
  85. {
  86. u32 val;
  87. int ret;
  88. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  89. writel(val, dbi_base + PCIE_PHY_CTRL);
  90. val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
  91. writel(val, dbi_base + PCIE_PHY_CTRL);
  92. ret = pcie_phy_poll_ack(dbi_base, 1);
  93. if (ret)
  94. return ret;
  95. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  96. writel(val, dbi_base + PCIE_PHY_CTRL);
  97. return pcie_phy_poll_ack(dbi_base, 0);
  98. }
  99. /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
  100. static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
  101. {
  102. u32 val, phy_ctl;
  103. int ret;
  104. ret = pcie_phy_wait_ack(dbi_base, addr);
  105. if (ret)
  106. return ret;
  107. /* assert Read signal */
  108. phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
  109. writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
  110. ret = pcie_phy_poll_ack(dbi_base, 1);
  111. if (ret)
  112. return ret;
  113. val = readl(dbi_base + PCIE_PHY_STAT);
  114. *data = val & 0xffff;
  115. /* deassert Read signal */
  116. writel(0x00, dbi_base + PCIE_PHY_CTRL);
  117. return pcie_phy_poll_ack(dbi_base, 0);
  118. }
  119. static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
  120. {
  121. u32 var;
  122. int ret;
  123. /* write addr */
  124. /* cap addr */
  125. ret = pcie_phy_wait_ack(dbi_base, addr);
  126. if (ret)
  127. return ret;
  128. var = data << PCIE_PHY_CTRL_DATA_LOC;
  129. writel(var, dbi_base + PCIE_PHY_CTRL);
  130. /* capture data */
  131. var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
  132. writel(var, dbi_base + PCIE_PHY_CTRL);
  133. ret = pcie_phy_poll_ack(dbi_base, 1);
  134. if (ret)
  135. return ret;
  136. /* deassert cap data */
  137. var = data << PCIE_PHY_CTRL_DATA_LOC;
  138. writel(var, dbi_base + PCIE_PHY_CTRL);
  139. /* wait for ack de-assertion */
  140. ret = pcie_phy_poll_ack(dbi_base, 0);
  141. if (ret)
  142. return ret;
  143. /* assert wr signal */
  144. var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
  145. writel(var, dbi_base + PCIE_PHY_CTRL);
  146. /* wait for ack */
  147. ret = pcie_phy_poll_ack(dbi_base, 1);
  148. if (ret)
  149. return ret;
  150. /* deassert wr signal */
  151. var = data << PCIE_PHY_CTRL_DATA_LOC;
  152. writel(var, dbi_base + PCIE_PHY_CTRL);
  153. /* wait for ack de-assertion */
  154. ret = pcie_phy_poll_ack(dbi_base, 0);
  155. if (ret)
  156. return ret;
  157. writel(0x0, dbi_base + PCIE_PHY_CTRL);
  158. return 0;
  159. }
  160. /* Added for PCI abort handling */
  161. static int imx6q_pcie_abort_handler(unsigned long addr,
  162. unsigned int fsr, struct pt_regs *regs)
  163. {
  164. return 0;
  165. }
  166. static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
  167. {
  168. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  169. u32 val, gpr1, gpr12;
  170. /*
  171. * If the bootloader already enabled the link we need some special
  172. * handling to get the core back into a state where it is safe to
  173. * touch it for configuration. As there is no dedicated reset signal
  174. * wired up for MX6QDL, we need to manually force LTSSM into "detect"
  175. * state before completely disabling LTSSM, which is a prerequisite
  176. * for core configuration.
  177. *
  178. * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
  179. * indication that the bootloader activated the link.
  180. */
  181. regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
  182. regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
  183. if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
  184. (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
  185. val = readl(pp->dbi_base + PCIE_PL_PFLR);
  186. val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
  187. val |= PCIE_PL_PFLR_FORCE_LINK;
  188. writel(val, pp->dbi_base + PCIE_PL_PFLR);
  189. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  190. IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
  191. }
  192. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  193. IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
  194. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  195. IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
  196. return 0;
  197. }
  198. static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
  199. {
  200. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  201. int ret;
  202. ret = clk_prepare_enable(imx6_pcie->pcie_phy);
  203. if (ret) {
  204. dev_err(pp->dev, "unable to enable pcie_phy clock\n");
  205. goto err_pcie_phy;
  206. }
  207. ret = clk_prepare_enable(imx6_pcie->pcie_bus);
  208. if (ret) {
  209. dev_err(pp->dev, "unable to enable pcie_bus clock\n");
  210. goto err_pcie_bus;
  211. }
  212. ret = clk_prepare_enable(imx6_pcie->pcie);
  213. if (ret) {
  214. dev_err(pp->dev, "unable to enable pcie clock\n");
  215. goto err_pcie;
  216. }
  217. /* power up core phy and enable ref clock */
  218. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  219. IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
  220. /*
  221. * the async reset input need ref clock to sync internally,
  222. * when the ref clock comes after reset, internal synced
  223. * reset time is too short, cannot meet the requirement.
  224. * add one ~10us delay here.
  225. */
  226. udelay(10);
  227. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  228. IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
  229. /* allow the clocks to stabilize */
  230. usleep_range(200, 500);
  231. /* Some boards don't have PCIe reset GPIO. */
  232. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  233. gpio_set_value(imx6_pcie->reset_gpio, 0);
  234. msleep(100);
  235. gpio_set_value(imx6_pcie->reset_gpio, 1);
  236. }
  237. return 0;
  238. err_pcie:
  239. clk_disable_unprepare(imx6_pcie->pcie_bus);
  240. err_pcie_bus:
  241. clk_disable_unprepare(imx6_pcie->pcie_phy);
  242. err_pcie_phy:
  243. return ret;
  244. }
  245. static void imx6_pcie_init_phy(struct pcie_port *pp)
  246. {
  247. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  248. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  249. IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
  250. /* configure constant input signal to the pcie ctrl and phy */
  251. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  252. IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
  253. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  254. IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
  255. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  256. IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
  257. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  258. IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
  259. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  260. IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
  261. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  262. IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
  263. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  264. IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
  265. }
  266. static int imx6_pcie_wait_for_link(struct pcie_port *pp)
  267. {
  268. unsigned int retries;
  269. for (retries = 0; retries < 200; retries++) {
  270. if (dw_pcie_link_up(pp))
  271. return 0;
  272. usleep_range(100, 1000);
  273. }
  274. dev_err(pp->dev, "phy link never came up\n");
  275. dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
  276. readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
  277. readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
  278. return -EINVAL;
  279. }
  280. static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp)
  281. {
  282. u32 tmp;
  283. unsigned int retries;
  284. for (retries = 0; retries < 200; retries++) {
  285. tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
  286. /* Test if the speed change finished. */
  287. if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
  288. return 0;
  289. usleep_range(100, 1000);
  290. }
  291. dev_err(pp->dev, "Speed change timeout\n");
  292. return -EINVAL;
  293. }
  294. static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
  295. {
  296. struct pcie_port *pp = arg;
  297. return dw_handle_msi_irq(pp);
  298. }
  299. static int imx6_pcie_establish_link(struct pcie_port *pp)
  300. {
  301. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  302. u32 tmp;
  303. int ret;
  304. /*
  305. * Force Gen1 operation when starting the link. In case the link is
  306. * started in Gen2 mode, there is a possibility the devices on the
  307. * bus will not be detected at all. This happens with PCIe switches.
  308. */
  309. tmp = readl(pp->dbi_base + PCIE_RC_LCR);
  310. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  311. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
  312. writel(tmp, pp->dbi_base + PCIE_RC_LCR);
  313. /* Start LTSSM. */
  314. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  315. IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
  316. ret = imx6_pcie_wait_for_link(pp);
  317. if (ret)
  318. return ret;
  319. /* Allow Gen2 mode after the link is up. */
  320. tmp = readl(pp->dbi_base + PCIE_RC_LCR);
  321. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  322. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
  323. writel(tmp, pp->dbi_base + PCIE_RC_LCR);
  324. /*
  325. * Start Directed Speed Change so the best possible speed both link
  326. * partners support can be negotiated.
  327. */
  328. tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
  329. tmp |= PORT_LOGIC_SPEED_CHANGE;
  330. writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
  331. ret = imx6_pcie_wait_for_speed_change(pp);
  332. if (ret) {
  333. dev_err(pp->dev, "Failed to bring link up!\n");
  334. return ret;
  335. }
  336. /* Make sure link training is finished as well! */
  337. ret = imx6_pcie_wait_for_link(pp);
  338. if (ret) {
  339. dev_err(pp->dev, "Failed to bring link up!\n");
  340. return ret;
  341. }
  342. tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
  343. dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
  344. return 0;
  345. }
  346. static void imx6_pcie_host_init(struct pcie_port *pp)
  347. {
  348. imx6_pcie_assert_core_reset(pp);
  349. imx6_pcie_init_phy(pp);
  350. imx6_pcie_deassert_core_reset(pp);
  351. dw_pcie_setup_rc(pp);
  352. imx6_pcie_establish_link(pp);
  353. if (IS_ENABLED(CONFIG_PCI_MSI))
  354. dw_pcie_msi_init(pp);
  355. }
  356. static void imx6_pcie_reset_phy(struct pcie_port *pp)
  357. {
  358. u32 tmp;
  359. pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
  360. tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  361. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  362. pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
  363. usleep_range(2000, 3000);
  364. pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
  365. tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  366. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  367. pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
  368. }
  369. static int imx6_pcie_link_up(struct pcie_port *pp)
  370. {
  371. u32 rc, debug_r0, rx_valid;
  372. int count = 5;
  373. /*
  374. * Test if the PHY reports that the link is up and also that the LTSSM
  375. * training finished. There are three possible states of the link when
  376. * this code is called:
  377. * 1) The link is DOWN (unlikely)
  378. * The link didn't come up yet for some reason. This usually means
  379. * we have a real problem somewhere. Reset the PHY and exit. This
  380. * state calls for inspection of the DEBUG registers.
  381. * 2) The link is UP, but still in LTSSM training
  382. * Wait for the training to finish, which should take a very short
  383. * time. If the training does not finish, we have a problem and we
  384. * need to inspect the DEBUG registers. If the training does finish,
  385. * the link is up and operating correctly.
  386. * 3) The link is UP and no longer in LTSSM training
  387. * The link is up and operating correctly.
  388. */
  389. while (1) {
  390. rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
  391. if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
  392. break;
  393. if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
  394. return 1;
  395. if (!count--)
  396. break;
  397. dev_dbg(pp->dev, "Link is up, but still in training\n");
  398. /*
  399. * Wait a little bit, then re-check if the link finished
  400. * the training.
  401. */
  402. usleep_range(1000, 2000);
  403. }
  404. /*
  405. * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
  406. * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
  407. * If (MAC/LTSSM.state == Recovery.RcvrLock)
  408. * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
  409. * to gen2 is stuck
  410. */
  411. pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
  412. debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
  413. if (rx_valid & 0x01)
  414. return 0;
  415. if ((debug_r0 & 0x3f) != 0x0d)
  416. return 0;
  417. dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
  418. dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
  419. imx6_pcie_reset_phy(pp);
  420. return 0;
  421. }
  422. static struct pcie_host_ops imx6_pcie_host_ops = {
  423. .link_up = imx6_pcie_link_up,
  424. .host_init = imx6_pcie_host_init,
  425. };
  426. static int __init imx6_add_pcie_port(struct pcie_port *pp,
  427. struct platform_device *pdev)
  428. {
  429. int ret;
  430. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  431. pp->msi_irq = platform_get_irq_byname(pdev, "msi");
  432. if (pp->msi_irq <= 0) {
  433. dev_err(&pdev->dev, "failed to get MSI irq\n");
  434. return -ENODEV;
  435. }
  436. ret = devm_request_irq(&pdev->dev, pp->msi_irq,
  437. imx6_pcie_msi_handler,
  438. IRQF_SHARED, "mx6-pcie-msi", pp);
  439. if (ret) {
  440. dev_err(&pdev->dev, "failed to request MSI irq\n");
  441. return -ENODEV;
  442. }
  443. }
  444. pp->root_bus_nr = -1;
  445. pp->ops = &imx6_pcie_host_ops;
  446. ret = dw_pcie_host_init(pp);
  447. if (ret) {
  448. dev_err(&pdev->dev, "failed to initialize host\n");
  449. return ret;
  450. }
  451. return 0;
  452. }
  453. static int __init imx6_pcie_probe(struct platform_device *pdev)
  454. {
  455. struct imx6_pcie *imx6_pcie;
  456. struct pcie_port *pp;
  457. struct device_node *np = pdev->dev.of_node;
  458. struct resource *dbi_base;
  459. int ret;
  460. imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
  461. if (!imx6_pcie)
  462. return -ENOMEM;
  463. pp = &imx6_pcie->pp;
  464. pp->dev = &pdev->dev;
  465. /* Added for PCI abort handling */
  466. hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
  467. "imprecise external abort");
  468. dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  469. pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
  470. if (IS_ERR(pp->dbi_base))
  471. return PTR_ERR(pp->dbi_base);
  472. /* Fetch GPIOs */
  473. imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
  474. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  475. ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
  476. GPIOF_OUT_INIT_LOW, "PCIe reset");
  477. if (ret) {
  478. dev_err(&pdev->dev, "unable to get reset gpio\n");
  479. return ret;
  480. }
  481. }
  482. /* Fetch clocks */
  483. imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
  484. if (IS_ERR(imx6_pcie->pcie_phy)) {
  485. dev_err(&pdev->dev,
  486. "pcie_phy clock source missing or invalid\n");
  487. return PTR_ERR(imx6_pcie->pcie_phy);
  488. }
  489. imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
  490. if (IS_ERR(imx6_pcie->pcie_bus)) {
  491. dev_err(&pdev->dev,
  492. "pcie_bus clock source missing or invalid\n");
  493. return PTR_ERR(imx6_pcie->pcie_bus);
  494. }
  495. imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
  496. if (IS_ERR(imx6_pcie->pcie)) {
  497. dev_err(&pdev->dev,
  498. "pcie clock source missing or invalid\n");
  499. return PTR_ERR(imx6_pcie->pcie);
  500. }
  501. /* Grab GPR config register range */
  502. imx6_pcie->iomuxc_gpr =
  503. syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
  504. if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
  505. dev_err(&pdev->dev, "unable to find iomuxc registers\n");
  506. return PTR_ERR(imx6_pcie->iomuxc_gpr);
  507. }
  508. ret = imx6_add_pcie_port(pp, pdev);
  509. if (ret < 0)
  510. return ret;
  511. platform_set_drvdata(pdev, imx6_pcie);
  512. return 0;
  513. }
  514. static void imx6_pcie_shutdown(struct platform_device *pdev)
  515. {
  516. struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
  517. /* bring down link, so bootloader gets clean state in case of reboot */
  518. imx6_pcie_assert_core_reset(&imx6_pcie->pp);
  519. }
  520. static const struct of_device_id imx6_pcie_of_match[] = {
  521. { .compatible = "fsl,imx6q-pcie", },
  522. {},
  523. };
  524. MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
  525. static struct platform_driver imx6_pcie_driver = {
  526. .driver = {
  527. .name = "imx6q-pcie",
  528. .of_match_table = imx6_pcie_of_match,
  529. },
  530. .shutdown = imx6_pcie_shutdown,
  531. };
  532. /* Freescale PCIe driver does not allow module unload */
  533. static int __init imx6_pcie_init(void)
  534. {
  535. return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
  536. }
  537. module_init(imx6_pcie_init);
  538. MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
  539. MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
  540. MODULE_LICENSE("GPL v2");