pci-imx6.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /*
  2. * PCIe host controller driver for Freescale i.MX6 SoCs
  3. *
  4. * Copyright (C) 2013 Kosagi
  5. * http://www.kosagi.com
  6. *
  7. * Author: Sean Cross <xobs@kosagi.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/gpio.h>
  16. #include <linux/kernel.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  19. #include <linux/module.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/pci.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/regmap.h>
  24. #include <linux/resource.h>
  25. #include <linux/signal.h>
  26. #include <linux/types.h>
  27. #include <linux/interrupt.h>
  28. #include "pcie-designware.h"
  29. #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
  30. struct imx6_pcie {
  31. int reset_gpio;
  32. struct clk *pcie_bus;
  33. struct clk *pcie_phy;
  34. struct clk *pcie;
  35. struct pcie_port pp;
  36. struct regmap *iomuxc_gpr;
  37. void __iomem *mem_base;
  38. };
  39. /* PCIe Root Complex registers (memory-mapped) */
  40. #define PCIE_RC_LCR 0x7c
  41. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
  42. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
  43. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
  44. /* PCIe Port Logic registers (memory-mapped) */
  45. #define PL_OFFSET 0x700
  46. #define PCIE_PL_PFLR (PL_OFFSET + 0x08)
  47. #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
  48. #define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
  49. #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
  50. #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
  51. #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
  52. #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
  53. #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
  54. #define PCIE_PHY_CTRL_DATA_LOC 0
  55. #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
  56. #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
  57. #define PCIE_PHY_CTRL_WR_LOC 18
  58. #define PCIE_PHY_CTRL_RD_LOC 19
  59. #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
  60. #define PCIE_PHY_STAT_ACK_LOC 16
  61. #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
  62. #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
  63. /* PHY registers (not memory-mapped) */
  64. #define PCIE_PHY_RX_ASIC_OUT 0x100D
  65. #define PHY_RX_OVRD_IN_LO 0x1005
  66. #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
  67. #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
  68. static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
  69. {
  70. u32 val;
  71. u32 max_iterations = 10;
  72. u32 wait_counter = 0;
  73. do {
  74. val = readl(dbi_base + PCIE_PHY_STAT);
  75. val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
  76. wait_counter++;
  77. if (val == exp_val)
  78. return 0;
  79. udelay(1);
  80. } while (wait_counter < max_iterations);
  81. return -ETIMEDOUT;
  82. }
  83. static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
  84. {
  85. u32 val;
  86. int ret;
  87. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  88. writel(val, dbi_base + PCIE_PHY_CTRL);
  89. val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
  90. writel(val, dbi_base + PCIE_PHY_CTRL);
  91. ret = pcie_phy_poll_ack(dbi_base, 1);
  92. if (ret)
  93. return ret;
  94. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  95. writel(val, dbi_base + PCIE_PHY_CTRL);
  96. ret = pcie_phy_poll_ack(dbi_base, 0);
  97. if (ret)
  98. return ret;
  99. return 0;
  100. }
  101. /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
  102. static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
  103. {
  104. u32 val, phy_ctl;
  105. int ret;
  106. ret = pcie_phy_wait_ack(dbi_base, addr);
  107. if (ret)
  108. return ret;
  109. /* assert Read signal */
  110. phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
  111. writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
  112. ret = pcie_phy_poll_ack(dbi_base, 1);
  113. if (ret)
  114. return ret;
  115. val = readl(dbi_base + PCIE_PHY_STAT);
  116. *data = val & 0xffff;
  117. /* deassert Read signal */
  118. writel(0x00, dbi_base + PCIE_PHY_CTRL);
  119. ret = pcie_phy_poll_ack(dbi_base, 0);
  120. if (ret)
  121. return ret;
  122. return 0;
  123. }
  124. static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
  125. {
  126. u32 var;
  127. int ret;
  128. /* write addr */
  129. /* cap addr */
  130. ret = pcie_phy_wait_ack(dbi_base, addr);
  131. if (ret)
  132. return ret;
  133. var = data << PCIE_PHY_CTRL_DATA_LOC;
  134. writel(var, dbi_base + PCIE_PHY_CTRL);
  135. /* capture data */
  136. var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
  137. writel(var, dbi_base + PCIE_PHY_CTRL);
  138. ret = pcie_phy_poll_ack(dbi_base, 1);
  139. if (ret)
  140. return ret;
  141. /* deassert cap data */
  142. var = data << PCIE_PHY_CTRL_DATA_LOC;
  143. writel(var, dbi_base + PCIE_PHY_CTRL);
  144. /* wait for ack de-assertion */
  145. ret = pcie_phy_poll_ack(dbi_base, 0);
  146. if (ret)
  147. return ret;
  148. /* assert wr signal */
  149. var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
  150. writel(var, dbi_base + PCIE_PHY_CTRL);
  151. /* wait for ack */
  152. ret = pcie_phy_poll_ack(dbi_base, 1);
  153. if (ret)
  154. return ret;
  155. /* deassert wr signal */
  156. var = data << PCIE_PHY_CTRL_DATA_LOC;
  157. writel(var, dbi_base + PCIE_PHY_CTRL);
  158. /* wait for ack de-assertion */
  159. ret = pcie_phy_poll_ack(dbi_base, 0);
  160. if (ret)
  161. return ret;
  162. writel(0x0, dbi_base + PCIE_PHY_CTRL);
  163. return 0;
  164. }
  165. /* Added for PCI abort handling */
  166. static int imx6q_pcie_abort_handler(unsigned long addr,
  167. unsigned int fsr, struct pt_regs *regs)
  168. {
  169. return 0;
  170. }
  171. static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
  172. {
  173. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  174. u32 val, gpr1, gpr12;
  175. /*
  176. * If the bootloader already enabled the link we need some special
  177. * handling to get the core back into a state where it is safe to
  178. * touch it for configuration. As there is no dedicated reset signal
  179. * wired up for MX6QDL, we need to manually force LTSSM into "detect"
  180. * state before completely disabling LTSSM, which is a prerequisite
  181. * for core configuration.
  182. *
  183. * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
  184. * indication that the bootloader activated the link.
  185. */
  186. regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
  187. regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
  188. if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
  189. (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
  190. val = readl(pp->dbi_base + PCIE_PL_PFLR);
  191. val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
  192. val |= PCIE_PL_PFLR_FORCE_LINK;
  193. writel(val, pp->dbi_base + PCIE_PL_PFLR);
  194. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  195. IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
  196. }
  197. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  198. IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
  199. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  200. IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
  201. return 0;
  202. }
  203. static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
  204. {
  205. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  206. int ret;
  207. ret = clk_prepare_enable(imx6_pcie->pcie_phy);
  208. if (ret) {
  209. dev_err(pp->dev, "unable to enable pcie_phy clock\n");
  210. goto err_pcie_phy;
  211. }
  212. ret = clk_prepare_enable(imx6_pcie->pcie_bus);
  213. if (ret) {
  214. dev_err(pp->dev, "unable to enable pcie_bus clock\n");
  215. goto err_pcie_bus;
  216. }
  217. ret = clk_prepare_enable(imx6_pcie->pcie);
  218. if (ret) {
  219. dev_err(pp->dev, "unable to enable pcie clock\n");
  220. goto err_pcie;
  221. }
  222. /* power up core phy and enable ref clock */
  223. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  224. IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
  225. /*
  226. * the async reset input need ref clock to sync internally,
  227. * when the ref clock comes after reset, internal synced
  228. * reset time is too short, cannot meet the requirement.
  229. * add one ~10us delay here.
  230. */
  231. udelay(10);
  232. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  233. IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
  234. /* allow the clocks to stabilize */
  235. usleep_range(200, 500);
  236. /* Some boards don't have PCIe reset GPIO. */
  237. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  238. gpio_set_value(imx6_pcie->reset_gpio, 0);
  239. msleep(100);
  240. gpio_set_value(imx6_pcie->reset_gpio, 1);
  241. }
  242. return 0;
  243. err_pcie:
  244. clk_disable_unprepare(imx6_pcie->pcie_bus);
  245. err_pcie_bus:
  246. clk_disable_unprepare(imx6_pcie->pcie_phy);
  247. err_pcie_phy:
  248. return ret;
  249. }
  250. static void imx6_pcie_init_phy(struct pcie_port *pp)
  251. {
  252. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  253. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  254. IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
  255. /* configure constant input signal to the pcie ctrl and phy */
  256. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  257. IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
  258. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  259. IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
  260. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  261. IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
  262. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  263. IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
  264. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  265. IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
  266. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  267. IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
  268. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  269. IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
  270. }
  271. static int imx6_pcie_wait_for_link(struct pcie_port *pp)
  272. {
  273. int count = 200;
  274. while (!dw_pcie_link_up(pp)) {
  275. usleep_range(100, 1000);
  276. if (--count)
  277. continue;
  278. dev_err(pp->dev, "phy link never came up\n");
  279. dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
  280. readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
  281. readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
  282. return -EINVAL;
  283. }
  284. return 0;
  285. }
  286. static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
  287. {
  288. struct pcie_port *pp = arg;
  289. return dw_handle_msi_irq(pp);
  290. }
  291. static int imx6_pcie_start_link(struct pcie_port *pp)
  292. {
  293. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  294. uint32_t tmp;
  295. int ret, count;
  296. /*
  297. * Force Gen1 operation when starting the link. In case the link is
  298. * started in Gen2 mode, there is a possibility the devices on the
  299. * bus will not be detected at all. This happens with PCIe switches.
  300. */
  301. tmp = readl(pp->dbi_base + PCIE_RC_LCR);
  302. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  303. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
  304. writel(tmp, pp->dbi_base + PCIE_RC_LCR);
  305. /* Start LTSSM. */
  306. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  307. IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
  308. ret = imx6_pcie_wait_for_link(pp);
  309. if (ret)
  310. return ret;
  311. /* Allow Gen2 mode after the link is up. */
  312. tmp = readl(pp->dbi_base + PCIE_RC_LCR);
  313. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  314. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
  315. writel(tmp, pp->dbi_base + PCIE_RC_LCR);
  316. /*
  317. * Start Directed Speed Change so the best possible speed both link
  318. * partners support can be negotiated.
  319. */
  320. tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
  321. tmp |= PORT_LOGIC_SPEED_CHANGE;
  322. writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
  323. count = 200;
  324. while (count--) {
  325. tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
  326. /* Test if the speed change finished. */
  327. if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
  328. break;
  329. usleep_range(100, 1000);
  330. }
  331. /* Make sure link training is finished as well! */
  332. if (count)
  333. ret = imx6_pcie_wait_for_link(pp);
  334. else
  335. ret = -EINVAL;
  336. if (ret) {
  337. dev_err(pp->dev, "Failed to bring link up!\n");
  338. } else {
  339. tmp = readl(pp->dbi_base + 0x80);
  340. dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
  341. }
  342. return ret;
  343. }
  344. static void imx6_pcie_host_init(struct pcie_port *pp)
  345. {
  346. imx6_pcie_assert_core_reset(pp);
  347. imx6_pcie_init_phy(pp);
  348. imx6_pcie_deassert_core_reset(pp);
  349. dw_pcie_setup_rc(pp);
  350. imx6_pcie_start_link(pp);
  351. if (IS_ENABLED(CONFIG_PCI_MSI))
  352. dw_pcie_msi_init(pp);
  353. }
  354. static void imx6_pcie_reset_phy(struct pcie_port *pp)
  355. {
  356. uint32_t temp;
  357. pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
  358. temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  359. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  360. pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
  361. usleep_range(2000, 3000);
  362. pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
  363. temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  364. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  365. pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
  366. }
  367. static int imx6_pcie_link_up(struct pcie_port *pp)
  368. {
  369. u32 rc, debug_r0, rx_valid;
  370. int count = 5;
  371. /*
  372. * Test if the PHY reports that the link is up and also that the LTSSM
  373. * training finished. There are three possible states of the link when
  374. * this code is called:
  375. * 1) The link is DOWN (unlikely)
  376. * The link didn't come up yet for some reason. This usually means
  377. * we have a real problem somewhere. Reset the PHY and exit. This
  378. * state calls for inspection of the DEBUG registers.
  379. * 2) The link is UP, but still in LTSSM training
  380. * Wait for the training to finish, which should take a very short
  381. * time. If the training does not finish, we have a problem and we
  382. * need to inspect the DEBUG registers. If the training does finish,
  383. * the link is up and operating correctly.
  384. * 3) The link is UP and no longer in LTSSM training
  385. * The link is up and operating correctly.
  386. */
  387. while (1) {
  388. rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
  389. if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
  390. break;
  391. if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
  392. return 1;
  393. if (!count--)
  394. break;
  395. dev_dbg(pp->dev, "Link is up, but still in training\n");
  396. /*
  397. * Wait a little bit, then re-check if the link finished
  398. * the training.
  399. */
  400. usleep_range(1000, 2000);
  401. }
  402. /*
  403. * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
  404. * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
  405. * If (MAC/LTSSM.state == Recovery.RcvrLock)
  406. * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
  407. * to gen2 is stuck
  408. */
  409. pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
  410. debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
  411. if (rx_valid & 0x01)
  412. return 0;
  413. if ((debug_r0 & 0x3f) != 0x0d)
  414. return 0;
  415. dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
  416. dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
  417. imx6_pcie_reset_phy(pp);
  418. return 0;
  419. }
  420. static struct pcie_host_ops imx6_pcie_host_ops = {
  421. .link_up = imx6_pcie_link_up,
  422. .host_init = imx6_pcie_host_init,
  423. };
  424. static int __init imx6_add_pcie_port(struct pcie_port *pp,
  425. struct platform_device *pdev)
  426. {
  427. int ret;
  428. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  429. pp->msi_irq = platform_get_irq_byname(pdev, "msi");
  430. if (pp->msi_irq <= 0) {
  431. dev_err(&pdev->dev, "failed to get MSI irq\n");
  432. return -ENODEV;
  433. }
  434. ret = devm_request_irq(&pdev->dev, pp->msi_irq,
  435. imx6_pcie_msi_handler,
  436. IRQF_SHARED, "mx6-pcie-msi", pp);
  437. if (ret) {
  438. dev_err(&pdev->dev, "failed to request MSI irq\n");
  439. return -ENODEV;
  440. }
  441. }
  442. pp->root_bus_nr = -1;
  443. pp->ops = &imx6_pcie_host_ops;
  444. ret = dw_pcie_host_init(pp);
  445. if (ret) {
  446. dev_err(&pdev->dev, "failed to initialize host\n");
  447. return ret;
  448. }
  449. return 0;
  450. }
  451. static int __init imx6_pcie_probe(struct platform_device *pdev)
  452. {
  453. struct imx6_pcie *imx6_pcie;
  454. struct pcie_port *pp;
  455. struct device_node *np = pdev->dev.of_node;
  456. struct resource *dbi_base;
  457. int ret;
  458. imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
  459. if (!imx6_pcie)
  460. return -ENOMEM;
  461. pp = &imx6_pcie->pp;
  462. pp->dev = &pdev->dev;
  463. /* Added for PCI abort handling */
  464. hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
  465. "imprecise external abort");
  466. dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  467. pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
  468. if (IS_ERR(pp->dbi_base))
  469. return PTR_ERR(pp->dbi_base);
  470. /* Fetch GPIOs */
  471. imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
  472. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  473. ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
  474. GPIOF_OUT_INIT_LOW, "PCIe reset");
  475. if (ret) {
  476. dev_err(&pdev->dev, "unable to get reset gpio\n");
  477. return ret;
  478. }
  479. }
  480. /* Fetch clocks */
  481. imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
  482. if (IS_ERR(imx6_pcie->pcie_phy)) {
  483. dev_err(&pdev->dev,
  484. "pcie_phy clock source missing or invalid\n");
  485. return PTR_ERR(imx6_pcie->pcie_phy);
  486. }
  487. imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
  488. if (IS_ERR(imx6_pcie->pcie_bus)) {
  489. dev_err(&pdev->dev,
  490. "pcie_bus clock source missing or invalid\n");
  491. return PTR_ERR(imx6_pcie->pcie_bus);
  492. }
  493. imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
  494. if (IS_ERR(imx6_pcie->pcie)) {
  495. dev_err(&pdev->dev,
  496. "pcie clock source missing or invalid\n");
  497. return PTR_ERR(imx6_pcie->pcie);
  498. }
  499. /* Grab GPR config register range */
  500. imx6_pcie->iomuxc_gpr =
  501. syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
  502. if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
  503. dev_err(&pdev->dev, "unable to find iomuxc registers\n");
  504. return PTR_ERR(imx6_pcie->iomuxc_gpr);
  505. }
  506. ret = imx6_add_pcie_port(pp, pdev);
  507. if (ret < 0)
  508. return ret;
  509. platform_set_drvdata(pdev, imx6_pcie);
  510. return 0;
  511. }
  512. static void imx6_pcie_shutdown(struct platform_device *pdev)
  513. {
  514. struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
  515. /* bring down link, so bootloader gets clean state in case of reboot */
  516. imx6_pcie_assert_core_reset(&imx6_pcie->pp);
  517. }
  518. static const struct of_device_id imx6_pcie_of_match[] = {
  519. { .compatible = "fsl,imx6q-pcie", },
  520. {},
  521. };
  522. MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
  523. static struct platform_driver imx6_pcie_driver = {
  524. .driver = {
  525. .name = "imx6q-pcie",
  526. .of_match_table = imx6_pcie_of_match,
  527. },
  528. .shutdown = imx6_pcie_shutdown,
  529. };
  530. /* Freescale PCIe driver does not allow module unload */
  531. static int __init imx6_pcie_init(void)
  532. {
  533. return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
  534. }
  535. module_init(imx6_pcie_init);
  536. MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
  537. MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
  538. MODULE_LICENSE("GPL v2");